aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS11
-rw-r--r--app/test-pmd/cmdline_tm.c12
-rw-r--r--config/common_base2
-rw-r--r--doc/guides/contributing/stable.rst6
-rw-r--r--doc/guides/contributing/versioning.rst19
-rw-r--r--doc/guides/eventdevs/octeontx.rst28
-rw-r--r--doc/guides/index.rst2
-rw-r--r--doc/guides/mempool/index.rst40
-rw-r--r--doc/guides/mempool/octeontx.rst104
-rw-r--r--doc/guides/nics/features.rst38
-rw-r--r--doc/guides/nics/features/default.ini3
-rw-r--r--doc/guides/nics/features/ixgbe.ini1
-rw-r--r--doc/guides/nics/features/ixgbe_vec.ini1
-rw-r--r--doc/guides/nics/features/ixgbe_vf.ini1
-rw-r--r--doc/guides/nics/features/ixgbe_vf_vec.ini1
-rw-r--r--doc/guides/nics/ixgbe.rst16
-rw-r--r--doc/guides/nics/mlx4.rst114
-rw-r--r--doc/guides/nics/octeontx.rst31
-rw-r--r--doc/guides/nics/qede.rst59
-rw-r--r--doc/guides/platform/index.rst39
-rw-r--r--doc/guides/platform/octeontx.rst81
-rw-r--r--doc/guides/prog_guide/env_abstraction_layer.rst2
-rw-r--r--doc/guides/rel_notes/deprecation.rst18
-rw-r--r--doc/guides/rel_notes/release_17_11.rst569
-rw-r--r--doc/guides/sample_app_ug/ip_pipeline.rst2
-rw-r--r--doc/guides/sample_app_ug/ipv4_multicast.rst2
-rw-r--r--drivers/bus/pci/bsd/pci.c2
-rw-r--r--drivers/net/ark/ark_ethdev_rx.c2
-rw-r--r--drivers/net/bnxt/bnxt.h2
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c2
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c68
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c63
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c3
-rw-r--r--drivers/net/i40e/i40e_rxtx.c9
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c2
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.c19
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.h2
-rw-r--r--drivers/net/liquidio/base/lio_hw_defs.h3
-rw-r--r--drivers/net/liquidio/base/lio_mbox.h1
-rw-r--r--drivers/net/liquidio/lio_ethdev.c12
-rw-r--r--drivers/net/mlx4/mlx4_ethdev.c19
-rw-r--r--drivers/net/mlx4/mlx4_intr.c2
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c1
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.c21
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h1
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c50
-rw-r--r--drivers/net/mlx5/mlx5_flow.c5
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c2
-rw-r--r--drivers/net/mlx5/mlx5_utils.h4
-rw-r--r--drivers/net/nfp/nfp_net.c2
-rw-r--r--drivers/net/nfp/nfp_nfpu.c5
-rw-r--r--drivers/net/nfp/nfp_nspu.c23
-rw-r--r--drivers/net/qede/qede_ethdev.c5
-rw-r--r--drivers/net/qede/qede_rxtx.c4
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tm.c40
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c2
-rw-r--r--drivers/net/virtio/virtio_ethdev.c54
-rw-r--r--drivers/net/virtio/virtio_pci.c43
-rw-r--r--drivers/net/virtio/virtio_pci.h8
-rw-r--r--examples/ipsec-secgw/ipsec.c4
-rw-r--r--examples/ipv4_multicast/main.c2
-rw-r--r--lib/librte_distributor/rte_distributor.c6
-rw-r--r--lib/librte_distributor/rte_distributor.h2
-rw-r--r--lib/librte_distributor/rte_distributor_private.h2
-rw-r--r--lib/librte_distributor/rte_distributor_v20.c3
-rw-r--r--lib/librte_eal/common/eal_common_log.c2
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h2
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h2
-rw-r--r--lib/librte_eal/common/include/rte_eal.h2
-rw-r--r--lib/librte_eal/common/include/rte_log.h2
-rw-r--r--lib/librte_eal/common/include/rte_random.h2
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/common/malloc_elem.c2
-rw-r--r--lib/librte_eal/common/rte_service.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_timer.c2
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c2
-rw-r--r--lib/librte_efd/rte_efd.c2
-rw-r--r--lib/librte_ether/rte_ethdev.h2
-rw-r--r--lib/librte_ether/rte_tm_driver.h2
-rw-r--r--lib/librte_gro/gro_tcp4.h2
-rw-r--r--lib/librte_gso/rte_gso.h2
-rw-r--r--lib/librte_ip_frag/ip_frag_internal.c2
-rw-r--r--lib/librte_ip_frag/rte_ip_frag.h6
-rw-r--r--lib/librte_ip_frag/rte_ipv4_reassembly.c4
-rw-r--r--lib/librte_jobstats/rte_jobstats.h2
-rw-r--r--lib/librte_kni/rte_kni.c4
-rw-r--r--lib/librte_kni/rte_kni.h2
-rw-r--r--lib/librte_kni/rte_kni_fifo.h2
-rw-r--r--lib/librte_mbuf/rte_mbuf.h6
-rw-r--r--lib/librte_net/net_crc_neon.h2
-rw-r--r--lib/librte_net/net_crc_sse.h2
-rw-r--r--lib/librte_net/rte_ip.h2
-rw-r--r--lib/librte_pdump/rte_pdump.c10
-rw-r--r--lib/librte_pipeline/rte_pipeline.h2
-rw-r--r--lib/librte_power/rte_power_acpi_cpufreq.c4
-rw-r--r--lib/librte_power/rte_power_acpi_cpufreq.h2
-rw-r--r--lib/librte_reorder/rte_reorder.h4
-rw-r--r--lib/librte_ring/rte_ring.h12
-rw-r--r--lib/librte_sched/rte_red.h4
-rw-r--r--lib/librte_sched/rte_sched.c2
-rw-r--r--lib/librte_security/rte_security.h3
-rw-r--r--lib/librte_timer/rte_timer.c2
-rw-r--r--test/test/autotest_test_funcs.py37
-rw-r--r--test/test/test_memzone.c135
-rwxr-xr-xusertools/dpdk-devbind.py2
106 files changed, 1419 insertions, 570 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 8d45ad0b..6ed2277c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -247,11 +247,13 @@ F: test/test/test_mbuf.c
Ethernet API
M: Thomas Monjalon <thomas@monjalon.net>
+T: git://dpdk.org/next/dpdk-next-net
F: lib/librte_ether/
F: devtools/test-null.sh
Flow API
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
+T: git://dpdk.org/next/dpdk-next-net
F: lib/librte_ether/rte_flow*
Traffic Management API - EXPERIMENTAL
@@ -265,6 +267,7 @@ F: lib/librte_ether/rte_mtr*
Crypto API
M: Declan Doherty <declan.doherty@intel.com>
+T: git://dpdk.org/next/dpdk-next-crypto
F: lib/librte_cryptodev/
F: test/test/test_cryptodev*
F: examples/l2fwd-crypto/
@@ -297,6 +300,7 @@ PCI bus driver
F: drivers/bus/pci/
VDEV bus driver
+M: Jianfeng Tan <jianfeng.tan@intel.com>
F: drivers/bus/vdev/
@@ -412,13 +416,13 @@ F: doc/guides/nics/intel_vf.rst
F: doc/guides/nics/features/i40e*.ini
Intel fm10k
-M: Jing Chen <jing.d.chen@intel.com>
+M: Qi Zhang <qi.z.zhang@intel.com>
+M: Xiao Wang <xiao.w.wang@intel.com>
F: drivers/net/fm10k/
F: doc/guides/nics/features/fm10k*.ini
Mellanox mlx4
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
-M: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
F: drivers/net/mlx4/
F: doc/guides/nics/mlx4.rst
F: doc/guides/nics/features/mlx4.ini
@@ -878,7 +882,7 @@ F: examples/timer/
F: doc/guides/sample_app_ug/timer.rst
Job statistics
-M: Pawel Wodkowski <pawelx.wodkowski@intel.com>
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: lib/librte_jobstats/
F: examples/l2fwd-jobstats/
F: doc/guides/sample_app_ug/l2_forward_job_stats.rst
@@ -996,7 +1000,6 @@ F: examples/performance-thread/
F: doc/guides/sample_app_ug/performance_thread.rst
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-M: Daniel Mrzyglod <danielx.t.mrzyglod@intel.com>
F: examples/ptpclient/
F: examples/quota_watermark/
diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c
index 4acef98f..803fae44 100644
--- a/app/test-pmd/cmdline_tm.c
+++ b/app/test-pmd/cmdline_tm.c
@@ -1599,12 +1599,6 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- /* Port status */
- if (port_is_started(port_id)) {
- printf(" Port %u not stopped (error)\n", port_id);
- return;
- }
-
memset(&np, 0, sizeof(struct rte_tm_node_params));
/* Node parameters */
@@ -1759,12 +1753,6 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- /* Port status */
- if (port_is_started(port_id)) {
- printf(" Port %u not stopped (error)\n", port_id);
- return;
- }
-
memset(&np, 0, sizeof(struct rte_tm_node_params));
/* Node parameters */
diff --git a/config/common_base b/config/common_base
index 34f04a9f..e74febef 100644
--- a/config/common_base
+++ b/config/common_base
@@ -415,7 +415,7 @@ CONFIG_RTE_LIBRTE_QEDE_DEBUG_INFO=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_QEDE_VF_TX_SWITCH=n
+CONFIG_RTE_LIBRTE_QEDE_VF_TX_SWITCH=y
#Provides abs path/name of the firmware file.
#Empty string denotes driver will use default firmware
CONFIG_RTE_LIBRTE_QEDE_FW=""
diff --git a/doc/guides/contributing/stable.rst b/doc/guides/contributing/stable.rst
index d52ec477..0f2f1f37 100644
--- a/doc/guides/contributing/stable.rst
+++ b/doc/guides/contributing/stable.rst
@@ -42,10 +42,10 @@ LTS Release
-----------
A stable release can be designated as an LTS release based on community
-agreement and a commitment from a maintainer. An LTS release will have a
-maintenance duration of 2 years.
+agreement and a commitment from a maintainer. The current policy is that each
+year's November release will be maintained as an LTS for 2 years.
-The current DPDK LTS release is 16.11.
+The current DPDK LTS releases are 16.11 and 17.11.
It is anticipated that there will be at least 4 releases per year of the LTS
or approximately 1 every 3 months. However, the cadence can be shorter or
diff --git a/doc/guides/contributing/versioning.rst b/doc/guides/contributing/versioning.rst
index 8d0fdb77..40009062 100644
--- a/doc/guides/contributing/versioning.rst
+++ b/doc/guides/contributing/versioning.rst
@@ -13,7 +13,9 @@ General Guidelines
------------------
#. Whenever possible, ABI should be preserved
-#. The libraries marked in experimental state may change without constraint.
+#. Libraries or APIs marked in ``experimental`` state may change without constraint.
+#. New APIs will be marked as ``experimental`` for at least one release to allow
+ any issues found by users of the new API to be fixed quickly
#. The addition of symbols is generally not problematic
#. The modification of symbols can generally be managed with versioning
#. The removal of symbols generally is an ABI break and requires bumping of the
@@ -41,6 +43,13 @@ ABI versions are set at the time of major release labeling, and the ABI may
change multiple times, without warning, between the last release label and the
HEAD label of the git tree.
+APIs marked as ``experimental`` are not considered part of the ABI and may
+change without warning at any time. Since changes to APIs are most likely
+immediately after their introduction, as users begin to take advantage of
+those new APIs and start finding issues with them, new DPDK APIs will be
+automatically marked as ``experimental`` to allow for a period of stabilization
+before they become part of a tracked ABI.
+
ABI versions, once released, are available until such time as their
deprecation has been noted in the Release Notes for at least one major release
cycle. For example consider the case where the ABI for DPDK 2.0 has been
@@ -58,6 +67,14 @@ being provided. The requirements for doing so are:
#. At least 3 acknowledgments of the need to do so must be made on the
dpdk.org mailing list.
+ - The acknowledgment of the maintainer of the component is mandatory, or if
+ no maintainer is available for the component, the tree/sub-tree maintainer
+ for that component must acknowledge the ABI change instead.
+
+ - It is also recommended that acknowledgments from different "areas of
+ interest" be sought for each deprecation, for example: from NIC vendors,
+ CPU vendors, end-users, etc.
+
#. The changes (including an alternative map file) must be gated with
the ``RTE_NEXT_ABI`` option, and provided with a deprecation notice at the
same time.
diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index 7e601a07..cef004a2 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -63,33 +63,7 @@ Supported OCTEONTX SoCs
Prerequisites
-------------
-There are three main pre-perquisites for executing SSOVF PMD on a OCTEONTX
-compatible board:
-
-1. **OCTEONTX Linux kernel PF driver for Network acceleration HW blocks**
-
- The OCTEONTX Linux kernel drivers (including the required PF driver for the
- SSOVF) are available on Github at `octeontx-kmod <https://github.com/caviumnetworks/octeontx-kmod>`_
- along with build, install and dpdk usage instructions.
-
-2. **ARM64 Tool Chain**
-
- For example, the *aarch64* Linaro Toolchain, which can be obtained from
- `here <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
-
-3. **Rootfile system**
-
- Any *aarch64* supporting filesystem can be used. For example,
- Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
- from `<http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
- As an alternative method, SSOVF PMD can also be executed using images provided
- as part of SDK from Cavium. The SDK includes all the above prerequisites necessary
- to bring up a OCTEONTX board.
-
- SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
-
-- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+See :doc: `../platform/octeontx` for setup information.
Pre-Installation Configuration
------------------------------
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 5b6eb7ec..f924a7cb 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -44,6 +44,8 @@ DPDK documentation
nics/index
cryptodevs/index
eventdevs/index
+ mempool/index
+ platform/index
contributing/index
rel_notes/index
faq/index
diff --git a/doc/guides/mempool/index.rst b/doc/guides/mempool/index.rst
new file mode 100644
index 00000000..b3c8e7f0
--- /dev/null
+++ b/doc/guides/mempool/index.rst
@@ -0,0 +1,40 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Cavium Inc. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium Inc nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Mempool Device Driver
+=====================
+
+The following are a list of mempool PMDs, which can be used from an
+application through the mempool API.
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ octeontx
diff --git a/doc/guides/mempool/octeontx.rst b/doc/guides/mempool/octeontx.rst
new file mode 100644
index 00000000..b262c823
--- /dev/null
+++ b/doc/guides/mempool/octeontx.rst
@@ -0,0 +1,104 @@
+.. BSD LICENSE
+ Copyright (C) Cavium, Inc. 2017. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium, Inc nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+OCTEONTX FPAVF Mempool Driver
+=============================
+
+The OCTEONTX FPAVF PMD (**librte_mempool_octeontx**) is a mempool
+driver for offload mempool device found in **Cavium OCTEONTX** SoC
+family.
+
+More information can be found at `Cavium, Inc Official Website
+<http://www.cavium.com/OCTEON-TX_ARM_Processors.html>`_.
+
+Features
+--------
+
+Features of the OCTEONTX FPAVF PMD are:
+
+- 32 SR-IOV Virtual functions
+- 32 Pools
+- HW mempool manager
+
+Supported OCTEONTX SoCs
+-----------------------
+
+- CN83xx
+
+Prerequisites
+-------------
+
+See :doc: `../platform/octeontx.rst` for setup information.
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS`` ( set to ``octeontx_fpavf``)
+
+ Set default mempool ops to octeontx_fpavf.
+
+- ``CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL`` (default ``y``)
+
+ Toggle compilation of the ``librte_mempool_octeontx`` driver.
+
+- ``CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG`` (default ``n``)
+
+ Toggle display of generic debugging messages
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile the OCTEONTX FPAVF MEMPOOL PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-thunderx-linuxapp-gcc test-build
+
+
+Initialization
+--------------
+
+The octeontx fpavf mempool initialization similar to other mempool
+drivers like ring. However user need to pass --base-virtaddr as
+command line input to application example test_mempool.c application.
+
+Example:
+
+.. code-block:: console
+
+ ./build/app/test -c 0xf --base-virtaddr=0x100000000000 \
+ --mbuf-pool-ops-name="octeontx_fpavf"
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index bfeae80e..d5bf38a2 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -136,6 +136,18 @@ invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
* **[related] API**: ``rte_eth_tx_burst()``.
+.. _nic_features_fast_mbuf_free:
+
+Fast mbuf free
+--------------
+
+Supports optimization for fast release of mbufs following successful Tx.
+Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
+
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+
+
.. _nic_features_free_tx_mbuf_on_demand:
Free Tx mbuf on demand
@@ -494,6 +506,23 @@ Supports adding traffic mirroring rules.
* **[related] API**: ``rte_eth_mirror_rule_set()``, ``rte_eth_mirror_rule_reset()``.
+.. _nic_features_inline_crypto_doc:
+
+Inline crypto
+-------------
+
+Supports inline crypto processing (eg. inline IPsec). See Security library and PMD documentation for more details.
+
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
+* **[implements] rte_security_ops**: ``session_create``, ``session_update``,
+ ``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
+ ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
+ ``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
+
+
.. _nic_features_crc_offload:
CRC offload
@@ -640,15 +669,6 @@ Supports packet type parsing and returns a list of supported types.
.. _nic_features_timesync:
-Mbuf fast free
---------------
-
-Supports optimization for fast release of mbufs following successful Tx.
-Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
-
-* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
-
Timesync
--------
diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini
index dc527ddf..dae2ad77 100644
--- a/doc/guides/nics/features/default.ini
+++ b/doc/guides/nics/features/default.ini
@@ -14,6 +14,7 @@ Removal event =
Queue status event =
Rx interrupt =
Lock-free Tx queue =
+Fast mbuf free =
Free Tx mbuf on demand =
Queue start/stop =
MTU update =
@@ -43,6 +44,7 @@ Flow control =
Flow API =
Rate limitation =
Traffic mirroring =
+Inline crypto =
CRC offload =
VLAN offload =
QinQ offload =
@@ -76,4 +78,3 @@ x86-64 =
Usage doc =
Design doc =
Perf doc =
-Mbuf fast free =
diff --git a/doc/guides/nics/features/ixgbe.ini b/doc/guides/nics/features/ixgbe.ini
index 9ff5d8f8..1d68ee8e 100644
--- a/doc/guides/nics/features/ixgbe.ini
+++ b/doc/guides/nics/features/ixgbe.ini
@@ -33,6 +33,7 @@ Flow control = Y
Flow API = Y
Rate limitation = Y
Traffic mirroring = Y
+Inline crypto = Y
CRC offload = Y
VLAN offload = Y
QinQ offload = Y
diff --git a/doc/guides/nics/features/ixgbe_vec.ini b/doc/guides/nics/features/ixgbe_vec.ini
index 4d56df4f..28bc0547 100644
--- a/doc/guides/nics/features/ixgbe_vec.ini
+++ b/doc/guides/nics/features/ixgbe_vec.ini
@@ -32,6 +32,7 @@ Flow director = Y
Flow control = Y
Rate limitation = Y
Traffic mirroring = Y
+Inline crypto = Y
Timesync = Y
Rx descriptor status = Y
Tx descriptor status = Y
diff --git a/doc/guides/nics/features/ixgbe_vf.ini b/doc/guides/nics/features/ixgbe_vf.ini
index b63e32ce..0a15500b 100644
--- a/doc/guides/nics/features/ixgbe_vf.ini
+++ b/doc/guides/nics/features/ixgbe_vf.ini
@@ -17,6 +17,7 @@ RSS hash = Y
RSS key update = Y
RSS reta update = Y
VLAN filter = Y
+Inline crypto = Y
CRC offload = Y
VLAN offload = Y
QinQ offload = Y
diff --git a/doc/guides/nics/features/ixgbe_vf_vec.ini b/doc/guides/nics/features/ixgbe_vf_vec.ini
index c994857e..80e7f3bd 100644
--- a/doc/guides/nics/features/ixgbe_vf_vec.ini
+++ b/doc/guides/nics/features/ixgbe_vf_vec.ini
@@ -17,6 +17,7 @@ RSS hash = Y
RSS key update = Y
RSS reta update = Y
VLAN filter = Y
+Inline crypto = Y
Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index c687c63f..d477ea05 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -239,6 +239,22 @@ There is no RTE API to add a VF's MAC address from the PF. On ixgbe, the
as a workaround.
+Inline crypto processing support
+--------------------------------
+
+Inline IPsec processing is supported for ``RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
+mode for ESP packets only:
+
+- ESP authentication only: AES-128-GMAC (128-bit key)
+- ESP encryption and authentication: AES-128-GCM (128-bit key)
+
+IPsec Security Gateway Sample Application supports inline IPsec processing for
+ixgbe PMD.
+
+For more details see the IPsec Security Gateway Sample Application and Security
+library documentation.
+
+
Supported Chipsets and NICs
---------------------------
diff --git a/doc/guides/nics/mlx4.rst b/doc/guides/nics/mlx4.rst
index 5c3fb764..22341b90 100644
--- a/doc/guides/nics/mlx4.rst
+++ b/doc/guides/nics/mlx4.rst
@@ -74,13 +74,6 @@ long as they share the same MAC address.
Compiling librte_pmd_mlx4 causes DPDK to be linked against libibverbs.
-Features
---------
-
-- Multi arch support: x86_64 and POWER8.
-- Link state information is provided.
-- RX interrupts.
-
Configuration
-------------
@@ -107,11 +100,6 @@ These options can be modified in the ``.config`` file.
to abort with harmless debugging messages as a workaround.
Relevant only when CONFIG_RTE_LIBRTE_MLX4_DEBUG is enabled.
-- ``CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE`` (default **0**)
-
- Amount of data to be inlined during TX operations. Improves latency but
- lowers throughput.
-
- ``CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE`` (default **8**)
Maximum number of cached memory pools (MPs) per TX queue. Each MP from
@@ -120,15 +108,6 @@ These options can be modified in the ``.config`` file.
This value is always 1 for RX queues since they use a single MP.
-Environment variables
-~~~~~~~~~~~~~~~~~~~~~
-
-- ``MLX4_INLINE_RECV_SIZE``
-
- A nonzero value enables inline receive for packets up to that size. May
- significantly improve performance in some cases but lower it in
- others. Requires careful testing.
-
Run-time configuration
~~~~~~~~~~~~~~~~~~~~~~
@@ -174,7 +153,7 @@ This driver relies on external libraries and kernel drivers for resources
allocations and initialization. The following dependencies are not part of
DPDK and must be installed separately:
-- **libibverbs**
+- **libibverbs** (provided by rdma-core package)
User space verbs framework used by librte_pmd_mlx4. This library provides
a generic interface between the kernel and low-level user space drivers
@@ -184,7 +163,7 @@ DPDK and must be installed separately:
resources allocations) to be managed by the kernel and fast operations to
never leave user space.
-- **libmlx4**
+- **libmlx4** (provided by rdma-core package)
Low-level user space driver library for Mellanox ConnectX-3 devices,
it is automatically loaded by libibverbs.
@@ -192,7 +171,7 @@ DPDK and must be installed separately:
This library basically implements send/receive calls to the hardware
queues.
-- **Kernel modules** (mlnx-ofed-kernel)
+- **Kernel modules**
They provide the kernel-side verbs API and low level device drivers that
manage actual hardware initialization and resources sharing with user
@@ -218,24 +197,27 @@ DPDK and must be installed separately:
Both libraries are BSD and GPL licensed. Linux kernel modules are GPL
licensed.
-Currently supported by DPDK:
+Depending on system constraints and user preferences either RDMA core library
+with a recent enough Linux kernel release (recommended) or Mellanox OFED,
+which provides compatibility with older releases.
+
+Current RDMA core package and Linux kernel (recommended)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- Mellanox OFED **4.1**.
-- Firmware version **2.36.5000** and above.
+- Minimal Linux kernel version: 4.14.
+- Minimal RDMA core version: v15 (see `RDMA core installation documentation`_).
-Getting Mellanox OFED
-~~~~~~~~~~~~~~~~~~~~~
+.. _`RDMA core installation documentation`: https://raw.githubusercontent.com/linux-rdma/rdma-core/master/README.md
-While these libraries and kernel modules are available on OpenFabrics
-Alliance's `website <https://www.openfabrics.org/>`_ and provided by package
-managers on most distributions, this PMD requires Ethernet extensions that
-may not be supported at the moment (this is a work in progress).
+.. _Mellanox_OFED_as_a_fallback:
-`Mellanox OFED
-<http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux_sw_drivers>`_
-includes the necessary support and should be used in the meantime. For DPDK,
-only libibverbs, libmlx4, mlnx-ofed-kernel packages and firmware updates are
-required from that distribution.
+Mellanox OFED as a fallback
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- `Mellanox OFED`_ version: **4.2**.
+- firmware version: **2.42.5000** and above.
+
+.. _`Mellanox OFED`: http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux_sw_drivers
.. note::
@@ -243,15 +225,10 @@ required from that distribution.
this DPDK release was developed and tested against is strongly
recommended. Please check the `prerequisites`_.
-Supported NICs
---------------
-
-* Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2*40G)
-
-Quick Start Guide
------------------
+Installing Mellanox OFED
+^^^^^^^^^^^^^^^^^^^^^^^^
-1. Download latest Mellanox OFED. For more info check the `prerequisites`_.
+1. Download latest Mellanox OFED.
2. Install the required libraries and kernel modules either by installing
only the required set, or by installing the entire Mellanox OFED:
@@ -260,19 +237,19 @@ Quick Start Guide
.. code-block:: console
- ./mlnxofedinstall
+ ./mlnxofedinstall --dpdk --upstream-libs
For SR-IOV hypervisors use:
.. code-block:: console
- ./mlnxofedinstall --enable-sriov -hypervisor
+ ./mlnxofedinstall --dpdk --upstream-libs --enable-sriov --hypervisor
For SR-IOV virtual machine use:
.. code-block:: console
- ./mlnxofedinstall --guest
+ ./mlnxofedinstall --dpdk --upstream-libs --guest
3. Verify the firmware is the correct one:
@@ -286,7 +263,19 @@ Quick Start Guide
connectx_port_config
- Or in the manual way:
+5. Continue with :ref:`section 2 of the Quick Start Guide <QSG_2>`.
+
+Supported NICs
+--------------
+
+* Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2*40G)
+
+.. _qsg:
+
+Quick Start Guide
+-----------------
+
+1. Set all ports links to Ethernet
.. code-block:: console
@@ -294,7 +283,15 @@ Quick Start Guide
echo eth > "/sys/bus/pci/devices/$PCI/mlx4_port0"
echo eth > "/sys/bus/pci/devices/$PCI/mlx4_port1"
-5. In case of bare metal or hypervisor, configure optimized steering mode
+ .. note::
+
+ If using Mellanox OFED one can permanently set the port link
+ to Ethernet using connectx_port_config tool provided by it.
+ :ref:`Mellanox_OFED_as_a_fallback`:
+
+.. _QSG_2:
+
+2. In case of bare metal or hypervisor, configure optimized steering mode
by adding the following line to ``/etc/modprobe.d/mlx4_core.conf``:
.. code-block:: console
@@ -306,7 +303,7 @@ Quick Start Guide
If VLAN filtering is used, set log_num_mgm_entry_size=-1.
Performance degradation can occur on this case.
-6. Restart the driver:
+3. Restart the driver:
.. code-block:: console
@@ -318,7 +315,7 @@ Quick Start Guide
service openibd restart
-7. Compile DPDK and you are ready to go. See instructions on
+4. Compile DPDK and you are ready to go. See instructions on
:ref:`Development Kit Build System <Development_Kit_Build_System>`
Performance tuning
@@ -330,10 +327,7 @@ Performance tuning
cat /sys/module/mlx4_core/parameters/log_num_mgm_entry_size
-2. Use environment variable MLX4_INLINE_RECV_SIZE=64 to get maximum
- performance for 64B messages.
-
-3. Use the CPU near local NUMA node to which the PCIe adapter is connected,
+2. Use the CPU near local NUMA node to which the PCIe adapter is connected,
for better performance. For VMs, verify that the right CPU
and NUMA node are pinned according to the above. Run:
@@ -343,19 +337,19 @@ Performance tuning
to identify the NUMA node to which the PCIe adapter is connected.
-4. If more than one adapter is used, and root complex capabilities allow
+3. If more than one adapter is used, and root complex capabilities allow
to put both adapters on the same NUMA node without PCI bandwidth degradation,
it is recommended to locate both adapters on the same NUMA node.
This in order to forward packets from one to the other without
NUMA performance penalty.
-5. Disable pause frames:
+4. Disable pause frames:
.. code-block:: console
ethtool -A <netdev> rx off tx off
-6. Verify IO non-posted prefetch is disabled by default. This can be checked
+5. Verify IO non-posted prefetch is disabled by default. This can be checked
via the BIOS configuration. Please contact you server provider for more
information about the settings.
diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst
index a6631cd0..90bb9e5d 100644
--- a/doc/guides/nics/octeontx.rst
+++ b/doc/guides/nics/octeontx.rst
@@ -71,34 +71,7 @@ The features supported by the device and not yet supported by this PMD include:
Prerequisites
-------------
-There are three main pre-perquisites for executing OCTEONTX PMD on a OCTEONTX
-compatible board:
-
-1. **OCTEONTX Linux kernel PF driver for Network acceleration HW blocks**
-
- The OCTEONTX Linux kernel drivers (including the required PF driver for the
- all network acceleration blocks) are available on GitHub at
- `octeontx-kmod <https://github.com/caviumnetworks/octeontx-kmod>`_
- along with build, install and dpdk usage instructions.
-
-2. **ARM64 Tool Chain**
-
- For example, the *aarch64* Linaro Toolchain, which can be obtained from
- `here <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
-
-3. **Rootfile system**
-
- Any *aarch64* supporting filesystem can be used. For example,
- Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
- from `<http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
- As an alternative method, OCTEONTX PMD can also be executed using images provided
- as part of SDK from Cavium. The SDK includes all the above prerequisites necessary
- to bring up a OCTEONTX board.
-
- SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
-
-Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+See :doc:`../platform/octeontx` for setup information.
Pre-Installation Configuration
------------------------------
@@ -222,7 +195,7 @@ Example:
.. code-block:: console
- ./your_dpdk_application --mbuf-pool-ops="octeontx_fpavf" \
+ ./your_dpdk_application --mbuf-pool-ops-name="octeontx_fpavf" \
--vdev='event_octeontx' \
--vdev="eth_octeontx,nr_port=2"
diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst
index 09a10be1..84becc98 100644
--- a/doc/guides/nics/qede.rst
+++ b/doc/guides/nics/qede.rst
@@ -1,5 +1,6 @@
.. BSD LICENSE
Copyright (c) 2016 QLogic Corporation
+ Copyright (c) 2017 Cavium Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,8 +33,7 @@ QEDE Poll Mode Driver
======================
The QEDE poll mode driver library (**librte_pmd_qede**) implements support
-for **QLogic FastLinQ QL4xxxx 10G/25G/40G/50G/100G CNA** family of adapters as well
-as their virtual functions (VF) in SR-IOV context. It is supported on
+for **QLogic FastLinQ QL4xxxx 10G/25G/40G/50G/100G Intelligent Ethernet Adapters (IEA) and Converged Network Adapters (CNA)** family of adapters as well as SR-IOV virtual functions (VF). It is supported on
several standard Linux distros like RHEL7.x, SLES12.x and Ubuntu.
It is compile-tested under FreeBSD OS.
@@ -48,21 +48,22 @@ Supported Features
- Allmulti mode
- Port hardware statistics
- Jumbo frames
-- VLAN offload - Filtering and stripping
-- Stateless checksum offloads (IPv4/TCP/UDP)
-- Multiple Rx/Tx queues
-- RSS (with RETA/hash table/key)
-- TSS
- Multiple MAC address
-- Default pause flow control
-- SR-IOV VF
- MTU change
+- Default pause flow control
- Multiprocess aware
- Scatter-Gather
-- VXLAN tunneling offload
-- N-tuple filter and flow director (limited support)
+- Multiple Rx/Tx queues
+- RSS (with RETA/hash table/key)
+- TSS
+- Stateless checksum offloads (IPv4/IPv6/TCP/UDP)
- LRO/TSO
+- VLAN offload - Filtering and stripping
+- N-tuple filter and flow director (limited support)
- NPAR (NIC Partitioning)
+- SR-IOV VF
+- VXLAN tunneling offload
+- MPLSoUDP Tx tunnel offload
Non-supported Features
----------------------
@@ -73,18 +74,30 @@ Non-supported Features
Supported QLogic Adapters
-------------------------
-- QLogic FastLinQ QL4xxxx 10G/25G/40G/50G/100G CNAs.
+- QLogic FastLinQ QL4xxxx 10G/25G/40G/50G/100G Intelligent Ethernet Adapters (IEA) and Converged Network Adapters (CNA)
Prerequisites
-------------
-- Requires firmware version **8.18.x.** and management firmware
- version **8.18.x or higher**. Firmware may be available
+- Requires storm firmware version **8.30.12.0**. Firmware may be available
inbox in certain newer Linux distros under the standard directory
- ``E.g. /lib/firmware/qed/qed_init_values-8.18.9.0.bin``
+ ``E.g. /lib/firmware/qed/qed_init_values-8.30.12.0.bin``
+ If the required firmware files are not available then download it from
+ `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
+ For downloading firmware file, select adapter category, model and DPDK Poll Mode Driver.
+
+- Requires management firmware (MFW) version **8.30.x.x** or higher to be
+ flashed on to the adapter. If the required management firmware is not
+ available then download from
+ `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
+ For downloading firmware upgrade utility, select adapter category, model and Linux distro.
+ To flash the management firmware refer to the instructions in the QLogic Firmware Upgrade Utility Readme document.
+
+- SR-IOV requires Linux PF driver version **8.20.x.x** or higher.
+ If the required PF driver is not available then download it from
+ `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
+ For downloading PF driver, select adapter category, model and Linux distro.
-- If the required firmware files are not available then visit
- `QLogic Driver Download Center <http://driverdownloads.qlogic.com>`_.
Performance note
~~~~~~~~~~~~~~~~
@@ -117,12 +130,18 @@ enabling debugging options may affect system performance.
Toggle display of receive fast path run-time messages.
+- ``CONFIG_RTE_LIBRTE_QEDE_VF_TX_SWITCH`` (default **"y"**)
+
+ A knob to control per-VF Tx switching feature.
+
- ``CONFIG_RTE_LIBRTE_QEDE_FW`` (default **""**)
Gives absolute path of firmware file.
- ``Eg: "/lib/firmware/qed/qed_init_values_zipped-8.18.9.0.bin"``
+ ``Eg: "/lib/firmware/qed/qed_init_values-8.30.12.0.bin"``
Empty string indicates driver will pick up the firmware file
- from the default location.
+ from the default location /lib/firmware/qed.
+ CAUTION this option is more for custom firmware, it is not
+ recommended for use under normal condition.
Driver compilation and testing
------------------------------
@@ -135,7 +154,7 @@ SR-IOV: Prerequisites and Sample Application Notes
This section provides instructions to configure SR-IOV with Linux OS.
-**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (QEDE) will function as SR-IOV PF driver. Requires PF driver to be 8.10.x.x or higher.
+**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (qede) will function as SR-IOV PF driver. Requires PF driver to be 8.10.x.x or higher.
#. Verify SR-IOV and ARI capability is enabled on the adapter using ``lspci``:
diff --git a/doc/guides/platform/index.rst b/doc/guides/platform/index.rst
new file mode 100644
index 00000000..69e560cd
--- /dev/null
+++ b/doc/guides/platform/index.rst
@@ -0,0 +1,39 @@
+.. BSD LICENSE
+ Copyright (C) Cavium, Inc. 2017. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium Inc nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Platform Specific Guides
+========================
+
+The following are platform specific guides and setup information.
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ octeontx
diff --git a/doc/guides/platform/octeontx.rst b/doc/guides/platform/octeontx.rst
new file mode 100644
index 00000000..fb708caf
--- /dev/null
+++ b/doc/guides/platform/octeontx.rst
@@ -0,0 +1,81 @@
+.. BSD LICENSE
+ Copyright (C) Cavium, Inc. 2017. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium, Inc nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+OCTEONTX Board Support Package
+==============================
+
+This doc has information about steps to setup octeontx platform
+and information about common offload hw block drivers of
+**Cavium OCTEONTX** SoC family.
+
+
+More information about SoC can be found at `Cavium, Inc Official Website
+<http://www.cavium.com/OCTEON-TX_ARM_Processors.html>`_.
+
+Common Offload HW Block Drivers
+-------------------------------
+
+1. **Eventdev Driver**
+ See :doc: `../eventdevs/octeontx.rst` for octeontx ssovf eventdev driver
+ information.
+
+2. **Mempool Driver**
+ See :doc: `../mempool/octeontx.rst` for octeontx fpavf mempool driver
+ information.
+
+Steps To Setup Platform
+-----------------------
+
+There are three main pre-prerequisites for setting up Platform drivers on
+OCTEONTX compatible board:
+
+1. **OCTEONTX Linux kernel PF driver for Network acceleration HW blocks**
+
+ The OCTEONTX Linux kernel drivers (includes the required PF driver for the
+ Platform drivers) are available on Github at `octeontx-kmod <https://github.com/caviumnetworks/octeontx-kmod>`_
+ along with build, install and dpdk usage instructions.
+
+2. **ARM64 Tool Chain**
+
+ For example, the *aarch64* Linaro Toolchain, which can be obtained from
+ `here <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
+ from `<http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
+
+ As an alternative method, Platform drivers can also be executed using images provided
+ as part of SDK from Cavium. The SDK includes all the above prerequisites necessary
+ to bring up a OCTEONTX board.
+
+ SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
+
+- Follow the DPDK :doc: `../linux_gsg/index.rst` to setup the basic DPDK environment.
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 9e834fc5..34d871c9 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -309,7 +309,7 @@ All these impacts are mentioned in :ref:`known_issue_label` section.
Public Thread API
~~~~~~~~~~~~~~~~~
-There are two public APIs ``rte_thread_set_affinity()`` and ``rte_pthread_get_affinity()`` introduced for threads.
+There are two public APIs ``rte_thread_set_affinity()`` and ``rte_thread_get_affinity()`` introduced for threads.
When they're used in any pthread context, the Thread Local Storage(TLS) will be set/get.
Those TLS include *_cpuset* and *_socket_id*:
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 7fcebf13..6d9517ef 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -8,14 +8,14 @@ API and ABI deprecation notices are to be posted here.
Deprecation Notices
-------------------
-* eal: several API and ABI changes are planned for ``rte_devargs`` in v17.11.
+* eal: several API and ABI changes are planned for ``rte_devargs`` in v18.02.
The format of device command line parameters will change. The bus will need
to be explicitly stated in the device declaration. The enum ``rte_devtype``
was used to identify a bus and will disappear.
The structure ``rte_devargs`` will change.
The ``rte_devargs_list`` will be made private.
The following functions are deprecated starting from 17.08 and will either be
- modified or removed in 17.11:
+ modified or removed in 18.02:
- ``rte_eal_devargs_add``
- ``rte_eal_devargs_type_count``
@@ -28,12 +28,14 @@ Deprecation Notices
- ``eal_parse_pci_DomBDF`` replaced by ``rte_pci_addr_parse``
- ``rte_eal_compare_pci_addr`` replaced by ``rte_pci_addr_cmp``
-* ethdev: Tx offloads will no longer be enabled by default in 17.11.
- Instead, the ``rte_eth_txmode`` structure will be extended with
- bit field to enable each Tx offload.
- Besides of making the Rx/Tx configuration API more consistent for the
- application, PMDs will be able to provide a better out of the box performance.
- As part of the work, ``ETH_TXQ_FLAGS_NO*`` will be superseded as well.
+* ethdev: a new Tx and Rx offload API was introduced on 17.11.
+ In the new API, offloads are divided into per-port and per-queue offloads.
+ Offloads are disabled by default and enabled per application request.
+ The old offloads API is target to be deprecated on 18.05. This includes:
+
+ - removal of ``ETH_TXQ_FLAGS_NO*`` flags.
+ - removal of ``txq_flags`` field from ``rte_eth_txconf`` struct.
+ - removal of the offloads bit-field from ``rte_eth_rxmode`` struct.
* ethdev: the legacy filter API, including
``rte_eth_dev_filter_supported()``, ``rte_eth_dev_filter_ctrl()`` as well
diff --git a/doc/guides/rel_notes/release_17_11.rst b/doc/guides/rel_notes/release_17_11.rst
index e6e4407c..57966434 100644
--- a/doc/guides/rel_notes/release_17_11.rst
+++ b/doc/guides/rel_notes/release_17_11.rst
@@ -43,40 +43,65 @@ New Features
* **Extended port_id range from uint8_t to uint16_t.**
- Increased port_id range from 8 bits to 16 bits in order to support more than
- 256 ports in dpdk. All ethdev APIs which have port_id as parameter are changed
- in the meantime.
+ Increased the ``port_id`` range from 8 bits to 16 bits in order to support
+ more than 256 ports in DPDK. All ethdev APIs which have ``port_id`` as
+ parameter have been changed.
* **Modified the return type of rte_eth_stats_reset.**
- Changed return type of ``rte_eth_stats_reset`` from ``void`` to ``int``
- so the caller may know whether a device supports the operation or not
+ Changed return type of ``rte_eth_stats_reset`` from ``void`` to ``int`` so
+ that the caller can determine whether a device supports the operation or not
and if the operation was carried out.
* **Added a new driver for Marvell Armada 7k/8k devices.**
- Added the new mrvl net driver for Marvell Armada 7k/8k devices. See the
- "Network Interface Controller Drivers" document for more details on this new
- driver.
+ Added the new ``mrvl`` net driver for Marvell Armada 7k/8k devices. See the
+ :doc:`../nics/mrvl` NIC guide for more details on this new driver.
+
+* **Updated mlx4 driver.**
+
+ Updated the mlx4 driver including the following changes:
+
+ * Isolated mode (rte_flow) can now be enabled anytime, not only during
+ initial device configuration.
+ * Flow rules now support up to 4096 priority levels usable at will by
+ applications.
+ * Enhanced error message to help debugging invalid/unsupported flow rules.
+ * Flow rules matching all multicast and promiscuous traffic are now allowed.
+ * No more software restrictions on flow rules with the RSS action, their
+ configuration is much more flexible.
+ * Significantly reduced memory footprint for Rx and Tx queue objects.
+ * While supported, UDP RSS is temporarily disabled due to a remaining issue
+ with its support in the Linux kernel.
+ * The new RSS implementation does not automatically spread traffic according
+ to the inner packet of VXLAN frames anymore, only the outer one (like
+ other PMDs).
+ * Partial (Tx only) support for secondary processes was broken and had to be
+ removed.
+ * Refactored driver to get rid of dependency on the components provided by
+ Mellanox OFED and instead rely on the current and public rdma-core
+ package and Linux version from now on.
+ * Removed compile-time limitation on number of device instances the PMD
+ can support.
* **Updated mlx5 driver.**
Updated the mlx5 driver including the following changes:
- * Enabled PMD to run on top of upstream linux kernel and rdma-core libs.
- By that removed the dependency on specific Mellanox OFED libraries.
+ * Enabled the PMD to run on top of upstream Linux kernel and rdma-core
+ libs, removing the dependency on specific Mellanox OFED libraries.
* Improved PMD latency performance.
* Improved PMD memory footprint.
- * Supported vectorized Rx/Tx burst for ARMv8.
- * Supported secondary process.
- * Supported flow counters.
- * Supported Rx hardware timestamp offload.
- * Supported device removal event.
+ * Added support for vectorized Rx/Tx burst for ARMv8.
+ * Added support for secondary process.
+ * Added support for flow counters.
+ * Added support for Rx hardware timestamp offload.
+ * Added support for device removal event.
* **Added SoftNIC PMD.**
- Added new SoftNIC PMD. This virtual device offers applications a software
- fallback support for traffic management.
+ Added a new SoftNIC PMD. This virtual device provides applications with
+ software fallback support for traffic management.
* **Added support for NXP DPAA Devices.**
@@ -86,24 +111,23 @@ New Features
* DPAA Mempool driver for supporting offloaded packet memory pool
* DPAA PMD for DPAA devices
- See the "Network Interface Controller Drivers" document for more details of
- this new driver.
+ See the :doc:`../nics/dpaa` document for more details of this new driver.
* **Updated support for Cavium OCTEONTX Device.**
- Updated support for Cavium's OCTEONTX device(CN83xx). This includes:
+ Updated support for Cavium's OCTEONTX device (CN83xx). This includes:
* OCTEONTX Mempool driver for supporting offloaded packet memory pool
* OCTEONTX Ethdev PMD
* OCTEONTX Eventdev-Ethdev Rx adapter
- See the "Network Interface Controller Drivers" document for more details of
- this new driver.
+ See the :doc:`../nics/octeontx` document for more details of this new driver.
-* **nfp: Added PF support.**
+* **Added PF support to the Netronome NFP PMD.**
- Previously Netronome's NFP PMD had just support for VFs. PF support is
- just as a basic DPDK port and has no VF management yet.
+ Added PF support to the Netronome NFP PMD. Previously the NFP PMD only
+ supported VFs. PF support is just as a basic DPDK port and has no VF
+ management yet.
PF support comes with firmware upload support which allows the PMD to
independently work from kernel netdev NFP drivers.
@@ -117,113 +141,115 @@ New Features
* Support for Flow API
* Support for Tx and Rx descriptor status functions
-* **Add bus agnostic functions to cryptodev for PMD initialisation**
+* **Added bus agnostic functions to cryptodev for PMD initialization**
- Adds new PMD assist functions ``rte_cryptodev_pmd_parse_input_args()``,
- ``rte_cryptodev_pmd_create()`` and ``rte_cryptodev_pmd_destroy()`` which
- are bus independent for driver to manage creation and destruction of new
- device instances.
+ Added new PMD assist, bus independent, functions
+ ``rte_cryptodev_pmd_parse_input_args()``, ``rte_cryptodev_pmd_create()`` and
+ ``rte_cryptodev_pmd_destroy()`` for drivers to manage creation and
+ destruction of new device instances.
* **Updated QAT crypto PMD.**
- Performance enhancements:
+ Added several performance enhancements:
* Removed atomics from the internal queue pair structure.
- * Coalesce writes to HEAD CSR on response processing.
- * Coalesce writes to TAIL CSR on request processing.
+ * Added coalesce writes to HEAD CSR on response processing.
+ * Added coalesce writes to TAIL CSR on request processing.
- Additional support for:
-
- * AES CCM algorithm.
+ In addition support was added for the AES CCM algorithm.
* **Updated the AESNI MB PMD.**
The AESNI MB PMD has been updated with additional support for:
- * DES CBC algorithm.
- * DES DOCSIS BPI algorithm.
+ * The DES CBC algorithm.
+ * The DES DOCSIS BPI algorithm.
- This requires the IPSec Multi-buffer library 0.47. For more details,
- check out the AESNI MB PMD documenation.
+ This change requires version 0.47 of the IPSec Multi-buffer library. For
+ more details see the :doc:`../cryptodevs/aesni_mb` documentation.
* **Updated the OpenSSL PMD.**
The OpenSSL PMD has been updated with additional support for:
- * DES CBC algorithm.
- * AES CCM algorithm.
+ * The DES CBC algorithm.
+ * The AES CCM algorithm.
* **Added NXP DPAA SEC crypto PMD.**
- A new "dpaa_sec" hardware based crypto PMD for NXP DPAA devices has been
- added. See the "Crypto Device Drivers" document for more details on this
- driver.
+ A new ``dpaa_sec`` hardware based crypto PMD for NXP DPAA devices has been
+ added. See the :doc:`../cryptodevs/dpaa_sec` document for more details.
* **Added MRVL crypto PMD.**
A new crypto PMD has been added, which provides several ciphering and hashing
algorithms. All cryptography operations use the MUSDK library crypto API.
+ See the :doc:`../cryptodevs/mrvl` document for more details.
* **Add new benchmarking mode to dpdk-test-crypto-perf application.**
- Added new "PMD cyclecount" benchmark mode to dpdk-test-crypto-perf application
- that displays more detailed breakdown of CPU cycles used by hardware
+ Added a new "PMD cyclecount" benchmark mode to the ``dpdk-test-crypto-perf``
+ application to display a detailed breakdown of CPU cycles used by hardware
acceleration.
* **Added the Security Offload Library.**
- Added an experimental library - rte_security. It provide security APIs for
- protocols like IPSec using inline ipsec offload to ethernet device or full
- protocol offload with lookaside crypto device.
+ Added an experimental library - ``rte_security``. This provide security APIs
+ for protocols like IPSec using inline ipsec offload to ethernet devices or
+ full protocol offload with lookaside crypto devices.
- See the "Security_Library" section of the DPDK Programmers Guide document,
- for more information.
+ See the :doc:`../prog_guide/rte_security` section of the DPDK Programmers
+ Guide document for more information.
-* **Updated DPAA2_SEC crypto driver.**
+* **Updated the DPAA2_SEC crypto driver to support rte_security.**
- Updated dpaa2_sec crypto PMD to support rte_security lookaside protocol
- offload for IPSec.
+ Updated the ``dpaa2_sec`` crypto PMD to support ``rte_security`` lookaside
+ protocol offload for IPSec.
-* **Updated IXGBE ethernet driver.**
+* **Updated the IXGBE ethernet driver to support rte_security.**
- Updated ixgbe ethernet PMD to support rte_security inline IPSec offload.
+ Updated ixgbe ethernet PMD to support ``rte_security`` inline IPSec offload.
-* **Updated ipsec-secgw application**
+* **Updated ipsec-secgw application to support rte_security.**
- Updated ipsec-secgw sample application to support rte_security actions for
- ipsec inline and full protocol offload using lookaside crypto offload.
+ Updated the ``ipsec-secgw`` sample application to support ``rte_security``
+ actions for ipsec inline and full protocol offload using lookaside crypto
+ offload.
* **Added IOMMU support to libvhost-user**
- Implemented device IOTLB in Vhost-user backend, and enabled Virtio's IOMMU
- feature. The feature is disabled by default, and can be enabled by setting
- RTE_VHOST_USER_IOMMU_SUPPORT flag at vhost device registration time.
+ Implemented device IOTLB in the Vhost-user backend, and enabled Virtio's
+ IOMMU feature. The feature is disabled by default, and can be enabled by
+ setting ``RTE_VHOST_USER_IOMMU_SUPPORT`` flag at vhost device registration
+ time.
* **Added the Event Ethernet Adapter Library.**
- Added the Event Ethernet Adapter library. It provices APIs for
- eventdev applications to configure the ethdev to eventdev packet flow.
+ Added the Event Ethernet Adapter library. This library provides APIs for
+ eventdev applications to configure the ethdev for eventdev packet flow.
-* **Updated DPAA2 Event PMD.**
+* **Updated DPAA2 Event PMD for the Event Ethernet Adapter.**
- Added support for eventdev ethernet adapter for DPAA2.
+ Added support for the eventdev ethernet adapter for DPAA2.
* **Added Membership library (rte_member).**
- Added membership library. It provides an API for DPDK applications to insert a
- new member, delete an existing member, or query the existence of a member in a
- given set, or a group of sets. For the case of a group of sets the library
- will return not only whether the element has been inserted before in one of
- the sets but also which set it belongs to.
+ Added a new data structure library called the Membership Library.
The Membership Library is an extension and generalization of a traditional
- filter (for example Bloom Filter) structure that has multiple usages in a wide
- variety of workloads and applications. In general, the Membership Library is a
- data structure that provides a “set-summary” and responds to set-membership
- queries whether a certain member belongs to a set(s).
+ filter (for example Bloom Filter) structure that has multiple usages in a
+ wide variety of workloads and applications. In general, the Membership
+ Library is a data structure that provides a "set-summary" and responds to
+ set-membership queries whether a certain member belongs to a set(s).
+
+ The library provides APIs for DPDK applications to insert a new member,
+ delete an existing member, and query the existence of a member in a given
+ set, or a group of sets. For the case of a group of sets the library will
+ return not only whether the element has been inserted in one of the sets but
+ also which set it belongs to.
- See the :ref:`Membership Library <Member_Library>` documentation in
- the Programmers Guide document, for more information.
+ See the :doc:`../prog_guide/member_lib` documentation in the Programmers
+ Guide, for more information.
* **Added the Generic Segmentation Offload Library.**
@@ -243,9 +269,9 @@ New Features
* **Added the Flow Classification Library.**
- Added the Flow Classification library, it provides an API for DPDK
- applications to classify an input packet by matching it against a set of flow
- rules. It uses the librte_table API to manage the flow rules.
+ Added an experimental Flow Classification library to provide APIs for DPDK
+ applications to classify an input packet by matching it against a set of
+ flow rules. It uses the ``librte_table`` API to manage the flow rules.
Resolved Issues
@@ -268,9 +294,6 @@ Resolved Issues
=========================================================
-EAL
-~~~
-
* **Service core fails to call service callback due to atomic lock**
In a specific configuration of multi-thread unsafe services and service
@@ -279,36 +302,6 @@ EAL
looked like another thread was executing the service callback. The logic for
atomic locking of the services has been fixed and refactored for readability.
-Drivers
-~~~~~~~
-
-
-Libraries
-~~~~~~~~~
-
-
-Examples
-~~~~~~~~
-
-
-Other
-~~~~~
-
-
-Known Issues
-------------
-
-.. This section should contain new known issues in this release. Sample format:
-
- * **Add title in present tense with full stop.**
-
- Add a short 1-2 sentence description of the known issue in the present
- tense. Add information on any known workarounds.
-
- This section is a comment. do not overwrite or remove it.
- Also, make sure to start the actual text at the margin.
- =========================================================
-
API Changes
-----------
@@ -323,43 +316,46 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================
-* **Ethdev device name length increased**
+* **Ethdev device name length increased.**
- The size of internal device name is increased to 64 characters
- to allow for storing longer bus specific name.
+ The size of internal device name has been increased to 64 characters
+ to allow for storing longer bus specific names.
-* **Ethdev flag RTE_ETH_DEV_DETACHABLE was removed**
+* **Removed the Ethdev RTE_ETH_DEV_DETACHABLE flag.**
- This flag is not necessary anymore, with the new hotplug implementation.
- It is now removed from the ether library. Its semantic is expressed at the bus
- and PMD level.
+ Removed the Ethdev ``RTE_ETH_DEV_DETACHABLE`` flag. This flag is not
+ required anymore, with the new hotplug implementation. It has been removed
+ from the ether library. Its semantics are now expressed at the bus and PMD
+ level.
* **Service cores API updated for usability**
- The service cores API has been changed, removing pointers from the API
- where possible, instead using integer IDs to identify each service. This
- simplifed application code, aids debugging, and provides better
+ The service cores API has been changed, removing pointers from the API where
+ possible, and instead using integer IDs to identify each service. This
+ simplifies application code, aids debugging, and provides better
encapsulation. A summary of the main changes made is as follows:
* Services identified by ID not by ``rte_service_spec`` pointer
* Reduced API surface by using ``set`` functions instead of enable/disable
* Reworked ``rte_service_register`` to provide the service ID to registrar
- * Rework start and stop APIs into ``rte_service_runstate_set``
- * Added API to set runstate of service implementation to indicate readyness
+ * Reworked start and stop APIs into ``rte_service_runstate_set``
+ * Added API to set runstate of service implementation to indicate readiness
-* **The following changes made in mempool library**
+* **The following changes have been made in the mempool library**
- * Moved ``flags`` datatype from int to unsigned int for ``rte_mempool``.
+ * Moved ``flags`` datatype from ``int`` to ``unsigned int`` for
+ ``rte_mempool``.
* Removed ``__rte_unused int flag`` param from ``rte_mempool_generic_put``
and ``rte_mempool_generic_get`` API.
* Added ``flags`` param in ``rte_mempool_xmem_size`` and
``rte_mempool_xmem_usage``.
+ * ``rte_mem_phy2mch`` was used in Xen dom0 to obtain the physical address;
+ remove this API as Xen dom0 support was removed.
-* ``rte_mem_phy2mch`` was used in Xen dom0 to obtain the physical address;
- remove this API as Xen dom0 support was removed.
+* **Added IOVA aliases related to physical address handling.**
-* **Some data type, structure members and functions related to physical address
- are deprecated and have new alias with IOVA wording.**
+ Some data types, structure members and functions related to physical address
+ handling are deprecated and have new aliases with IOVA wording. For example:
* ``phys_addr_t`` can be often replaced by ``rte_iova_t`` of same size.
* ``RTE_BAD_PHYS_ADDR`` is often replaced by ``RTE_BAD_IOVA`` of same value.
@@ -367,10 +363,13 @@ API Changes
* ``rte_mem_virt2phy()`` can often be replaced by ``rte_mem_virt2iova``.
* ``rte_malloc_virt2phy`` is aliased with ``rte_malloc_virt2iova``.
* ``rte_memzone.phys_addr`` is aliased with ``rte_memzone.iova``.
- * ``rte_mempool_objhdr.physaddr`` is aliased with ``rte_mempool_objhdr.iova``.
- * ``rte_mempool_memhdr.phys_addr`` is aliased with ``rte_mempool_memhdr.iova``.
+ * ``rte_mempool_objhdr.physaddr`` is aliased with
+ ``rte_mempool_objhdr.iova``.
+ * ``rte_mempool_memhdr.phys_addr`` is aliased with
+ ``rte_mempool_memhdr.iova``.
* ``rte_mempool_virt2phy()`` can be replaced by ``rte_mempool_virt2iova()``.
- * ``rte_mempool_populate_phys*()`` are aliased with ``rte_mempool_populate_iova*()``
+ * ``rte_mempool_populate_phys*()`` are aliased with
+ ``rte_mempool_populate_iova*()``
* ``rte_mbuf.buf_physaddr`` is aliased with ``rte_mbuf.buf_iova``.
* ``rte_mbuf_data_dma_addr*()`` are aliased with ``rte_mbuf_data_iova*()``.
* ``rte_pktmbuf_mtophys*`` are aliased with ``rte_pktmbuf_iova*()``.
@@ -380,26 +379,26 @@ API Changes
The PCI bus previously implemented within the EAL has been moved.
A first part has been added as an RTE library providing PCI helpers to
parse device locations or other such utilities.
- A second part consisting in the actual bus driver has been moved to its
+ A second part consisting of the actual bus driver has been moved to its
proper subdirectory, without changing its functionalities.
- As such, several PCI-related functions are not proposed by the EAL anymore:
-
- * rte_pci_detach
- * rte_pci_dump
- * rte_pci_ioport_map
- * rte_pci_ioport_read
- * rte_pci_ioport_unmap
- * rte_pci_ioport_write
- * rte_pci_map_device
- * rte_pci_probe
- * rte_pci_probe_one
- * rte_pci_read_config
- * rte_pci_register
- * rte_pci_scan
- * rte_pci_unmap_device
- * rte_pci_unregister
- * rte_pci_write_config
+ As such, several PCI-related functions are not exposed by the EAL anymore:
+
+ * ``rte_pci_detach``
+ * ``rte_pci_dump``
+ * ``rte_pci_ioport_map``
+ * ``rte_pci_ioport_read``
+ * ``rte_pci_ioport_unmap``
+ * ``rte_pci_ioport_write``
+ * ``rte_pci_map_device``
+ * ``rte_pci_probe``
+ * ``rte_pci_probe_one``
+ * ``rte_pci_read_config``
+ * ``rte_pci_register``
+ * ``rte_pci_scan``
+ * ``rte_pci_unmap_device``
+ * ``rte_pci_unregister``
+ * ``rte_pci_write_config``
These functions are made available either as part of ``librte_pci`` or
``librte_bus_pci``.
@@ -407,6 +406,7 @@ API Changes
* **Moved vdev bus APIs outside of the EAL**
Moved the following APIs from ``librte_eal`` to ``librte_bus_vdev``:
+
* ``rte_vdev_init``
* ``rte_vdev_register``
* ``rte_vdev_uninit``
@@ -415,13 +415,14 @@ API Changes
* **Add return value to stats_get dev op API**
The ``stats_get`` dev op API return value has been changed to be int.
- By this way PMDs can return an error value in case of failure at stats
+ In this way PMDs can return an error value in case of failure at stats
getting process time.
-* **Modified the rte_cryptodev_allocate_driver function in the cryptodev library.**
+* **Modified the rte_cryptodev_allocate_driver function.**
- The function ``rte_cryptodev_allocate_driver()`` has been modified.
- An extra parameter ``struct cryptodev_driver *crypto_drv`` has been added.
+ Modified the ``rte_cryptodev_allocate_driver()`` function in the cryptodev
+ library. An extra parameter ``struct cryptodev_driver *crypto_drv`` has been
+ added.
* **Removed virtual device bus specific functions from librte_cryptodev.**
@@ -430,8 +431,9 @@ API Changes
and have been replaced by non bus specific functions
``rte_cryptodev_pmd_parse_input_args()`` and ``rte_cryptodev_pmd_create()``.
-* ``rte_cryptodev_create_vdev`` was removed to avoid the dependency on vdev
- in librte_cryptodev; instead, users can call rte_vdev_init() directly.
+ The ``rte_cryptodev_create_vdev()`` function was removed to avoid the
+ dependency on vdev in librte_cryptodev; instead, users can call
+ ``rte_vdev_init()`` directly.
* **Removed PCI device bus specific functions from librte_cryptodev.**
@@ -444,29 +446,30 @@ API Changes
The functions ``rte_set_log_level()``, ``rte_get_log_level()``,
``rte_set_log_type()`` and ``rte_get_log_type()`` have been removed.
+
They are respectively replaced by ``rte_log_set_global_level()``,
``rte_log_get_global_level()``, ``rte_log_set_level()`` and
``rte_log_get_level()``.
-* **Removed ``mbuf`` flags ``PKT_RX_VLAN_PKT`` and ``PKT_RX_QINQ_PKT``.**
+* **Removed mbuf flags PKT_RX_VLAN_PKT and PKT_RX_QINQ_PKT.**
The ``mbuf`` flags ``PKT_RX_VLAN_PKT`` and ``PKT_RX_QINQ_PKT`` have
- been removed since their behavior were not properly described.
+ been removed since their behavior was not properly described.
-* **Added ``mbuf`` flags ``PKT_RX_VLAN`` and ``PKT_RX_QINQ``.**
+* **Added mbuf flags PKT_RX_VLAN and PKT_RX_QINQ.**
Two ``mbuf`` flags have been added to indicate that the VLAN
identifier has been saved in in the ``mbuf`` structure. For instance:
- - if VLAN is not stripped and TCI is saved: ``PKT_RX_VLAN``
- - if VLAN is stripped and TCI is saved: ``PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED``
+ - If VLAN is not stripped and TCI is saved: ``PKT_RX_VLAN``
+ - If VLAN is stripped and TCI is saved: ``PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED``
* **Modified the vlan_offload_set_t function prototype in the ethdev library.**
- Changed the function prototype of ``vlan_offload_set_t``. The return value
- has been changed from ``void`` to ``int`` so the caller to knows whether
- the backing device supports the operation or if the operation was
- successfully performed.
+ Modified the ``vlan_offload_set_t`` function prototype in the ethdev
+ library. The return value has been changed from ``void`` to ``int`` so the
+ caller can determine whether the backing device supports the operation or if
+ the operation was successfully performed.
ABI Changes
@@ -485,17 +488,17 @@ ABI Changes
* **Extended port_id range.**
The size of the field ``port_id`` in the ``rte_eth_dev_data`` structure
- changed, as described in the `New Features` section.
+ has changed, as described in the `New Features` section above.
* **New parameter added to rte_eth_dev.**
- New parameter ``security_ctx`` added to ``rte_eth_dev`` to support security
- operations like IPSec inline.
+ A new parameter ``security_ctx`` has been added to ``rte_eth_dev`` to
+ support security operations like IPSec inline.
* **New parameter added to rte_cryptodev.**
- New parameter ``security_ctx`` added to ``rte_cryptodev`` to support security
- operations like lookaside crypto.
+ A new parameter ``security_ctx`` has been added to ``rte_cryptodev`` to
+ support security operations like lookaside crypto.
Removed Items
@@ -510,10 +513,10 @@ Removed Items
Also, make sure to start the actual text at the margin.
=========================================================
-* Xen dom0 in EAL was removed, as well as xenvirt PMD and vhost_xen.
+* Xen dom0 in EAL has been removed, as well as the xenvirt PMD and vhost_xen.
* The crypto performance unit tests have been removed,
- replaced by the dpdk-test-crypto-perf application.
+ replaced by the ``dpdk-test-crypto-perf`` application.
Shared Library Versions
@@ -598,3 +601,219 @@ Tested Platforms
This section is a comment. do not overwrite or remove it.
Also, make sure to start the actual text at the margin.
=========================================================
+
+* Intel(R) platforms with Intel(R) NICs combinations
+
+ * CPU
+
+ * Intel(R) Atom(TM) CPU C2758 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU D-1540 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU D-1541 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-4667 v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz
+ * Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v2 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
+
+ * OS:
+
+ * CentOS 7.2
+ * Fedora 25
+ * Fedora 26
+ * FreeBSD 11
+ * Red Hat Enterprise Linux Server release 7.3
+ * SUSE Enterprise Linux 12
+ * Wind River Linux 8
+ * Ubuntu 16.04
+ * Ubuntu 16.10
+
+ * NICs:
+
+ * Intel(R) 82599ES 10 Gigabit Ethernet Controller
+
+ * Firmware version: 0x61bf0001
+ * Device id (pf/vf): 8086:10fb / 8086:10ed
+ * Driver version: 5.2.3 (ixgbe)
+
+ * Intel(R) Corporation Ethernet Connection X552/X557-AT 10GBASE-T
+
+ * Firmware version: 0x800003e7
+ * Device id (pf/vf): 8086:15ad / 8086:15a8
+ * Driver version: 4.4.6 (ixgbe)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA4 (4x10G)
+
+ * Firmware version: 6.01 0x80003205
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 2.1.26 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA2 (2x10G)
+
+ * Firmware version: 6.01 0x80003204
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 2.1.26 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XXV710-DA2 (2x25G)
+
+ * Firmware version: 6.01 0x80003221
+ * Device id (pf/vf): 8086:158b
+ * Driver version: 2.1.26 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XL710-QDA2 (2X40G)
+
+ * Firmware version: 6.01 0x8000321c
+ * Device id (pf/vf): 8086:1583 / 8086:154c
+ * Driver version: 2.1.26 (i40e)
+
+ * Intel(R) Corporation I350 Gigabit Network Connection
+
+ * Firmware version: 1.63, 0x80000dda
+ * Device id (pf/vf): 8086:1521 / 8086:1520
+ * Driver version: 5.3.0-k (igb)
+
+* Intel(R) platforms with Mellanox(R) NICs combinations
+
+ * Platform details:
+
+ * Intel(R) Xeon(R) CPU E5-2697A v4 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2697 v3 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz
+ * Intel(R) Xeon(R) CPU E5-2640 @ 2.50GHz
+ * Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz
+
+ * OS:
+
+ * Red Hat Enterprise Linux Server release 7.3 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.2 (Maipo)
+ * Ubuntu 16.10
+ * Ubuntu 16.04
+ * Ubuntu 14.04
+
+ * MLNX_OFED: 4.2-1.0.0.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1007
+ * Firmware version: 2.42.5000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT/MCX413A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT/MCX413A-GCAT (1x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT/MCX416A-BCAT/MCX416A-GCAT
+ (2x50G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.21.1000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.21.1000
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.21.1000
+
+ * Mellanox(R) ConnectX-5 Ex EN 100G MCX516A-CDAT (2x100G)
+
+ * Host interface: PCI Express 4.0 x16
+ * Device ID: 15b3:1019
+ * Firmware version: 16.21.1000
+
+* ARM platforms with Mellanox(R) NICs combinations
+
+ * Platform details:
+
+ * Qualcomm ARM 1.1 2500MHz
+
+ * OS:
+
+ * Ubuntu 16.04
+
+ * MLNX_OFED: 4.2-1.0.0.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.21.1000
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.21.1000
diff --git a/doc/guides/sample_app_ug/ip_pipeline.rst b/doc/guides/sample_app_ug/ip_pipeline.rst
index 693d813c..e0aa1484 100644
--- a/doc/guides/sample_app_ug/ip_pipeline.rst
+++ b/doc/guides/sample_app_ug/ip_pipeline.rst
@@ -233,7 +233,7 @@ The application startup arguments are:
* Default: Not present
* Argument: Path to the CLI script file to be run by the master pipeline at application startup.
- No CLI script file will be run at startup of this argument is not present.
+ No CLI script file will be run at startup if this argument is not present.
``-p PORT_MASK``
diff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst
index fd1af006..7a8e7ebc 100644
--- a/doc/guides/sample_app_ug/ipv4_multicast.rst
+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst
@@ -339,7 +339,7 @@ It is the mcast_out_pkt() function that performs the packet duplication (either
/* update header's fields */
hdr->pkt.pkt_len = (uint16_t)(hdr->pkt.data_len + pkt->pkt.pkt_len);
- hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
+ hdr->pkt.nb_segs = pkt->pkt.nb_segs + 1;
/* copy metadata from source packet */
diff --git a/drivers/bus/pci/bsd/pci.c b/drivers/bus/pci/bsd/pci.c
index facc4b12..b8e21784 100644
--- a/drivers/bus/pci/bsd/pci.c
+++ b/drivers/bus/pci/bsd/pci.c
@@ -75,7 +75,7 @@
/**
* @file
- * PCI probing under linux
+ * PCI probing under BSD
*
* This code is used to simulate a PCI probe by parsing information in
* sysfs. Moreover, when a registered driver matches a device, the
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index a3c0377c..987d085e 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -356,7 +356,7 @@ eth_ark_rx_jumbo(struct ark_rx_queue *queue,
uint16_t remaining;
uint16_t data_len;
- uint8_t segments;
+ uint16_t segments;
/* first buf populated by called */
mbuf_prev = mbuf0;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 646fe79e..8ab1c7f8 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -164,6 +164,8 @@ struct bnxt_link_info {
uint16_t auto_link_speed;
uint16_t auto_link_speed_mask;
uint32_t preemphasis;
+ uint8_t phy_type;
+ uint8_t media_type;
};
#define BNXT_COS_QUEUE_COUNT 8
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 26b2755e..19c684ca 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -55,7 +55,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
- bnxt_link_update_op(bp->eth_dev, 0);
+ bnxt_link_update_op(bp->eth_dev, 1);
break;
default:
RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index e8c7d0e7..3b6813cb 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -146,6 +146,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
ETH_RSS_NONFRAG_IPV6_UDP)
static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
+static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
/***********************/
@@ -370,6 +371,7 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
}
+ bnxt_print_link_info(bp->eth_dev);
return 0;
@@ -533,20 +535,6 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
return 0;
}
-static inline int
-rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &eth_dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return 1;
-
- return 0;
-}
-
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
{
struct rte_eth_link *link = &eth_dev->data->dev_link;
@@ -585,7 +573,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
if (rc)
goto error;
- bnxt_link_update_op(eth_dev, 0);
+ bnxt_link_update_op(eth_dev, 1);
if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
vlan_mask |= ETH_VLAN_FILTER_MASK;
@@ -607,9 +595,14 @@ error:
static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ int rc = 0;
- eth_dev->data->dev_link.link_status = 1;
- bnxt_set_hwrm_link_config(bp, true);
+ if (!bp->link_info.link_up)
+ rc = bnxt_set_hwrm_link_config(bp, true);
+ if (!rc)
+ eth_dev->data->dev_link.link_status = 1;
+
+ bnxt_print_link_info(eth_dev);
return 0;
}
@@ -619,6 +612,8 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_link.link_status = 0;
bnxt_set_hwrm_link_config(bp, false);
+ bp->link_info.link_up = 0;
+
return 0;
}
@@ -760,7 +755,8 @@ out:
/* Timed out or success */
if (new.link_status != eth_dev->data->dev_link.link_status ||
new.link_speed != eth_dev->data->dev_link.link_speed) {
- rte_bnxt_atomic_write_link_status(eth_dev, &new);
+ memcpy(&eth_dev->data->dev_link, &new,
+ sizeof(struct rte_eth_link));
bnxt_print_link_info(eth_dev);
}
@@ -1956,25 +1952,29 @@ parse_ntuple_filter(struct bnxt *bp,
}
static struct bnxt_filter_info*
-bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic,
+bnxt_match_ntuple_filter(struct bnxt *bp,
struct bnxt_filter_info *bfilter)
{
struct bnxt_filter_info *mfilter = NULL;
+ int i;
- STAILQ_FOREACH(mfilter, &vnic->filter, next) {
- if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
- bfilter->src_ipaddr_mask[0] ==
- mfilter->src_ipaddr_mask[0] &&
- bfilter->src_port == mfilter->src_port &&
- bfilter->src_port_mask == mfilter->src_port_mask &&
- bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
- bfilter->dst_ipaddr_mask[0] ==
- mfilter->dst_ipaddr_mask[0] &&
- bfilter->dst_port == mfilter->dst_port &&
- bfilter->dst_port_mask == mfilter->dst_port_mask &&
- bfilter->flags == mfilter->flags &&
- bfilter->enables == mfilter->enables)
- return mfilter;
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ STAILQ_FOREACH(mfilter, &vnic->filter, next) {
+ if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
+ bfilter->src_ipaddr_mask[0] ==
+ mfilter->src_ipaddr_mask[0] &&
+ bfilter->src_port == mfilter->src_port &&
+ bfilter->src_port_mask == mfilter->src_port_mask &&
+ bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
+ bfilter->dst_ipaddr_mask[0] ==
+ mfilter->dst_ipaddr_mask[0] &&
+ bfilter->dst_port == mfilter->dst_port &&
+ bfilter->dst_port_mask == mfilter->dst_port_mask &&
+ bfilter->flags == mfilter->flags &&
+ bfilter->enables == mfilter->enables)
+ return mfilter;
+ }
}
return NULL;
}
@@ -2023,7 +2023,7 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
bfilter->ethertype = 0x800;
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
- mfilter = bnxt_match_ntuple_filter(vnic, bfilter);
+ mfilter = bnxt_match_ntuple_filter(bp, bfilter);
if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) {
RTE_LOG(ERR, PMD, "filter exists.");
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index bf1fb469..d2c800dd 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -550,7 +550,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
}
req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
- memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
+ //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -715,34 +715,38 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
- uint32_t link_speed_mask =
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
HWRM_PREP(req, PORT_PHY_CFG);
if (conf->link_up) {
+ /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
+ if (bp->link_info.auto_mode && conf->link_speed) {
+ req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
+ RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+ }
+
req.flags = rte_cpu_to_le_32(conf->phy_flags);
req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
/*
* Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
* any auto mode, even "none".
*/
if (!conf->link_speed) {
- req.auto_mode = conf->auto_mode;
- enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
- if (conf->auto_mode ==
- HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
- req.auto_link_speed_mask =
- conf->auto_link_speed_mask;
- enables |= link_speed_mask;
- }
- if (bp->link_info.auto_link_speed) {
- req.auto_link_speed =
- bp->link_info.auto_link_speed;
- enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
- }
+ /* No speeds specified. Enable AutoNeg - all speeds */
+ req.auto_mode =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
}
+ /* AutoNeg - Advertise speeds specified. */
+ if (conf->auto_link_speed_mask) {
+ req.auto_mode =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
+ req.auto_link_speed_mask =
+ conf->auto_link_speed_mask;
+ enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
+ }
+
req.auto_duplex = conf->duplex;
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
req.auto_pause = conf->auto_pause;
@@ -791,6 +795,8 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
link_info->auto_pause = resp->auto_pause;
link_info->force_pause = resp->force_pause;
link_info->auto_mode = resp->auto_mode;
+ link_info->phy_type = resp->phy_type;
+ link_info->media_type = resp->media_type;
link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
@@ -1886,6 +1892,11 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
return hw_link_duplex;
}
+static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
+{
+ return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
+}
+
static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
{
uint16_t eth_link_speed = 0;
@@ -2094,7 +2105,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
int rc = 0;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_link_info link_req;
- uint16_t speed;
+ uint16_t speed, autoneg;
if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
return 0;
@@ -2109,20 +2120,28 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
if (!link_up)
goto port_phy_cfg;
+ autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
- if (speed == 0) {
+ if (autoneg == 1) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
- link_req.auto_mode =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
link_req.auto_link_speed_mask =
bnxt_parse_eth_link_speed_mask(bp,
dev_conf->link_speeds);
} else {
+ if (bp->link_info.phy_type ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
+ bp->link_info.phy_type ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
+ bp->link_info.media_type ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
+ RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+ return -EINVAL;
+ }
+
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
link_req.link_speed = speed;
- RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
link_req.auto_pause = bp->link_info.auto_pause;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 02d9e579..91b5bb03 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -650,7 +650,8 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
uint32_t vector_id;
int i, err;
- if (rte_intr_allow_others(intr_handle))
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_allow_others(intr_handle))
vector_id = I40E_RX_VEC_START;
else
vector_id = I40E_MISC_VEC_ID;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 8b4f612f..ad06b71e 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1473,13 +1473,10 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
m = tx_pkts[i];
ol_flags = m->ol_flags;
- /**
- * m->nb_segs is uint8_t, so nb_segs is always less than
- * I40E_TX_MAX_SEG.
- * We check only a condition for nb_segs > I40E_TX_MAX_MTU_SEG.
- */
+ /* Check for m->nb_segs to not exceed the limits. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
- if (m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ if (m->nb_segs > I40E_TX_MAX_SEG ||
+ m->nb_segs > I40E_TX_MAX_MTU_SEG) {
rte_errno = -EINVAL;
return i;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 012d9ee8..9bc84624 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2639,7 +2639,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
static void __attribute__((cold))
ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
- uint8_t i, nb_segs = m->nb_segs;
+ uint16_t i, nb_segs = m->nb_segs;
struct rte_mbuf *next_seg;
for (i = 0; i < nb_segs; i++) {
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.c b/drivers/net/liquidio/base/lio_23xx_vf.c
index 99780178..e30c20dc 100644
--- a/drivers/net/liquidio/base/lio_23xx_vf.c
+++ b/drivers/net/liquidio/base/lio_23xx_vf.c
@@ -379,6 +379,25 @@ cn23xx_vf_disable_io_queues(struct lio_device *lio_dev)
cn23xx_vf_reset_io_queues(lio_dev, num_queues);
}
+void
+cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev)
+{
+ struct lio_mbox_cmd mbox_cmd;
+
+ memset(&mbox_cmd, 0, sizeof(struct lio_mbox_cmd));
+ mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 0;
+ mbox_cmd.msg.s.cmd = LIO_VF_FLR_REQUEST;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = NULL;
+ mbox_cmd.fn_arg = 0;
+
+ lio_mbox_write(lio_dev, &mbox_cmd);
+}
+
static void
cn23xx_pfvf_hs_callback(struct lio_device *lio_dev,
struct lio_mbox_cmd *cmd, void *arg)
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.h b/drivers/net/liquidio/base/lio_23xx_vf.h
index 83dc053a..ad8db0df 100644
--- a/drivers/net/liquidio/base/lio_23xx_vf.h
+++ b/drivers/net/liquidio/base/lio_23xx_vf.h
@@ -87,6 +87,8 @@ int cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev);
#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT 100000
+void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev);
+
int cn23xx_pfvf_handshake(struct lio_device *lio_dev);
int cn23xx_vf_setup_device(struct lio_device *lio_dev);
diff --git a/drivers/net/liquidio/base/lio_hw_defs.h b/drivers/net/liquidio/base/lio_hw_defs.h
index d4cd23ce..fe5c3bbb 100644
--- a/drivers/net/liquidio/base/lio_hw_defs.h
+++ b/drivers/net/liquidio/base/lio_hw_defs.h
@@ -80,6 +80,9 @@
/* Max IOQs per LIO Link */
#define LIO_MAX_IOQS_PER_IF 64
+/* Wait time in milliseconds for FLR */
+#define LIO_PCI_FLR_WAIT 100
+
enum lio_card_type {
LIO_23XX /* 23xx */
};
diff --git a/drivers/net/liquidio/base/lio_mbox.h b/drivers/net/liquidio/base/lio_mbox.h
index f1c5b8ec..b0875d64 100644
--- a/drivers/net/liquidio/base/lio_mbox.h
+++ b/drivers/net/liquidio/base/lio_mbox.h
@@ -43,6 +43,7 @@
#define LIO_MBOX_DATA_MAX 32
#define LIO_VF_ACTIVE 0x1
+#define LIO_VF_FLR_REQUEST 0x2
#define LIO_CORES_CRASHED 0x3
/* Macro for Read acknowledgment */
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 4b189661..84b8a328 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -1636,6 +1636,11 @@ lio_dev_close(struct rte_eth_dev *eth_dev)
rte_write32(pkt_count, droq->pkts_sent_reg);
}
+ if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
+ cn23xx_vf_ask_pf_to_do_flr(lio_dev);
+ rte_delay_ms(LIO_PCI_FLR_WAIT);
+ }
+
/* lio_free_mbox */
lio_dev->fn_list.free_mbox(lio_dev);
@@ -2009,6 +2014,13 @@ lio_first_time_init(struct lio_device *lio_dev,
if (cn23xx_pfvf_handshake(lio_dev))
goto error;
+ /* Request and wait for device reset. */
+ if (pdev->kdrv == RTE_KDRV_IGB_UIO) {
+ cn23xx_vf_ask_pf_to_do_flr(lio_dev);
+ /* FLR wait time doubled as a precaution. */
+ rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
+ }
+
if (cn23xx_vf_set_io_queues_off(lio_dev)) {
lio_dev_err(lio_dev, "Setting io queues off failed\n");
goto error;
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index c2ea4db1..2f69e7d4 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -1036,12 +1036,27 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+ static const uint32_t ptypes_l2tun[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_UNKNOWN
};
+ struct priv *priv = dev->data->dev_private;
- if (dev->rx_pkt_burst == mlx4_rx_burst)
- return ptypes;
+ if (dev->rx_pkt_burst == mlx4_rx_burst) {
+ if (priv->hw_csum_l2tun)
+ return ptypes_l2tun;
+ else
+ return ptypes;
+ }
return NULL;
}
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index b17d109a..50d19769 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -98,7 +98,7 @@ mlx4_rx_intr_vec_enable(struct priv *priv)
struct rte_intr_handle *intr_handle = &priv->intr_handle;
mlx4_rx_intr_vec_disable(priv);
- intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
if (intr_handle->intr_vec == NULL) {
rte_errno = ENOMEM;
ERROR("failed to allocate memory for interrupt vector,"
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 8b97a894..53313c56 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -750,6 +750,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
dev->data->dev_conf.rxmode.hw_ip_checksum),
.csum_l2tun = (priv->hw_csum_l2tun &&
dev->data->dev_conf.rxmode.hw_ip_checksum),
+ .l2tun_offload = priv->hw_csum_l2tun,
.stats = {
.idx = idx,
},
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index 3985e06d..2bfa8b1b 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -336,6 +336,7 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
{
unsigned int elts_comp = txq->elts_comp;
unsigned int elts_tail = txq->elts_tail;
+ unsigned int sq_tail = sq->tail;
struct mlx4_cq *cq = &txq->mcq;
volatile struct mlx4_cqe *cqe;
uint32_t cons_index = cq->cons_index;
@@ -372,13 +373,13 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
rte_be_to_cpu_16(cqe->wqe_index) & sq->txbb_cnt_mask;
do {
/* Free next descriptor. */
- nr_txbbs +=
+ sq_tail += nr_txbbs;
+ nr_txbbs =
mlx4_txq_stamp_freed_wqe(sq,
- (sq->tail + nr_txbbs) & sq->txbb_cnt_mask,
- !!((sq->tail + nr_txbbs) & sq->txbb_cnt));
+ sq_tail & sq->txbb_cnt_mask,
+ !!(sq_tail & sq->txbb_cnt));
pkts++;
- } while (((sq->tail + nr_txbbs) & sq->txbb_cnt_mask) !=
- new_index);
+ } while ((sq_tail & sq->txbb_cnt_mask) != new_index);
cons_index++;
} while (1);
if (unlikely(pkts == 0))
@@ -386,7 +387,7 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
/* Update CQ. */
cq->cons_index = cons_index;
*cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & MLX4_CQ_DB_CI_MASK);
- sq->tail = sq->tail + nr_txbbs;
+ sq->tail = sq_tail + nr_txbbs;
/* Update the list of packets posted for transmission. */
elts_comp -= pkts;
assert(elts_comp <= txq->elts_comp);
@@ -751,7 +752,8 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Packet type for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe)
+rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe,
+ uint32_t l2tun_offload)
{
uint8_t idx = 0;
uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
@@ -762,7 +764,7 @@ rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe)
* bit[7] - MLX4_CQE_L2_TUNNEL
* bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
*/
- if (!(pinfo & MLX4_CQE_L2_VLAN_MASK) && (pinfo & MLX4_CQE_L2_TUNNEL))
+ if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL))
idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
/*
@@ -960,7 +962,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
pkt = seg;
/* Update packet information. */
- pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+ pkt->packet_type =
+ rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
pkt->ol_flags = 0;
pkt->pkt_len = len;
if (rxq->csum | rxq->csum_l2tun) {
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 4acad801..463df2b0 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -80,6 +80,7 @@ struct rxq {
volatile uint32_t *rq_db; /**< RQ doorbell record. */
uint32_t csum:1; /**< Enable checksum offloading. */
uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
+ uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */
struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
struct mlx4_rxq_stats stats; /**< Rx queue counters. */
unsigned int socket; /**< CPU socket ID for allocations. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index c31ea4b6..a3cef689 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -865,39 +865,39 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
sc = ecmd->link_mode_masks[0] |
((uint64_t)ecmd->link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
- if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
+ if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
- if (sc & (ETHTOOL_LINK_MODE_1000baseT_Full_BIT |
- ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
priv->link_speed_capa |= ETH_LINK_SPEED_1G;
- if (sc & (ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT |
- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT |
- ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
priv->link_speed_capa |= ETH_LINK_SPEED_10G;
- if (sc & (ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT |
- ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
priv->link_speed_capa |= ETH_LINK_SPEED_20G;
- if (sc & (ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT |
- ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT |
- ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT |
- ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
priv->link_speed_capa |= ETH_LINK_SPEED_40G;
- if (sc & (ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT |
- ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT |
- ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT |
- ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
priv->link_speed_capa |= ETH_LINK_SPEED_56G;
- if (sc & (ETHTOOL_LINK_MODE_25000baseCR_Full_BIT |
- ETHTOOL_LINK_MODE_25000baseKR_Full_BIT |
- ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
priv->link_speed_capa |= ETH_LINK_SPEED_25G;
- if (sc & (ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT |
- ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
priv->link_speed_capa |= ETH_LINK_SPEED_50G;
- if (sc & (ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT |
- ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT |
- ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
- ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
+ if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
+ MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
priv->link_speed_capa |= ETH_LINK_SPEED_100G;
dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index cd99cb07..3df8fba4 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2914,13 +2914,16 @@ priv_fdir_filter_delete(struct priv *priv,
flow_h = flow_spec;
if (memcmp(spec, flow_spec,
RTE_MIN(attr_h->size, flow_h->size)))
- continue;
+ goto wrong_flow;
spec = (void *)((uintptr_t)attr + attr_h->size);
flow_spec = (void *)((uintptr_t)flow_attr +
flow_h->size);
}
/* At this point, the flow match. */
break;
+wrong_flow:
+ /* The flow does not match. */
+ continue;
}
if (flow)
priv_flow_destroy(priv, &priv->flows, flow);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 6b29aaee..85399eff 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -331,7 +331,7 @@ priv_rx_intr_vec_enable(struct priv *priv)
if (!priv->dev->data->dev_conf.intr_conf.rxq)
return 0;
priv_rx_intr_vec_disable(priv);
- intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
if (intr_handle->intr_vec == NULL) {
ERROR("failed to allocate memory for interrupt vector,"
" Rx interrupts will not be supported");
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 218ae831..2fbd10b1 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -35,6 +35,7 @@
#define RTE_PMD_MLX5_UTILS_H_
#include <stddef.h>
+#include <stdint.h>
#include <stdio.h>
#include <limits.h>
#include <assert.h>
@@ -61,6 +62,9 @@
!!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
+/* Convert a bit number to the corresponding 64-bit mask */
+#define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
+
/* Save and restore errno around argument evaluation. */
#define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 83dec061..0501156b 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -1038,6 +1038,8 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
+ memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
+
/* reading per RX ring stats */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
diff --git a/drivers/net/nfp/nfp_nfpu.c b/drivers/net/nfp/nfp_nfpu.c
index 5775d8da..f11afef3 100644
--- a/drivers/net/nfp/nfp_nfpu.c
+++ b/drivers/net/nfp/nfp_nfpu.c
@@ -75,8 +75,13 @@ nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp)
/* barsz in log2 */
while (barsz >>= 1)
i++;
+
barsz = i;
+ /* Sanity check: we can assume any bar size less than 1MB an error */
+ if (barsz < 20)
+ return -1;
+
/* Getting address for NFP expansion BAR registers */
cfg_base = pci_dev->mem_resource[0].addr;
cfg_base = (uint8_t *)cfg_base + NFP_CFG_EXP_BAR_CFG_BASE;
diff --git a/drivers/net/nfp/nfp_nspu.c b/drivers/net/nfp/nfp_nspu.c
index 6ba940cb..f9089832 100644
--- a/drivers/net/nfp/nfp_nspu.c
+++ b/drivers/net/nfp/nfp_nspu.c
@@ -341,7 +341,12 @@ nfp_fw_upload(nspu_desc_t *nspu_desc)
return -ENOENT;
}
- fstat(fw_f, &file_stat);
+ if (fstat(fw_f, &file_stat) < 0) {
+ RTE_LOG(INFO, PMD, "Firmware file %s/%s size is unknown",
+ DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
+ close(fw_f);
+ return -ENOENT;
+ }
fsize = file_stat.st_size;
RTE_LOG(DEBUG, PMD, "Firmware file with size: %" PRIu64 "\n",
@@ -351,12 +356,14 @@ nfp_fw_upload(nspu_desc_t *nspu_desc)
RTE_LOG(INFO, PMD, "fw file too big: %" PRIu64
" bytes (%" PRIu64 " max)",
(uint64_t)fsize, (uint64_t)size);
+ close(fw_f);
return -EINVAL;
}
fw_buf = malloc((size_t)size);
if (!fw_buf) {
RTE_LOG(INFO, PMD, "malloc failed for fw buffer");
+ close(fw_f);
return -ENOMEM;
}
memset(fw_buf, 0, size);
@@ -367,12 +374,14 @@ nfp_fw_upload(nspu_desc_t *nspu_desc)
"Just %" PRIu64 " of %" PRIu64 " bytes read.",
(uint64_t)bytes, (uint64_t)fsize);
free(fw_buf);
+ close(fw_f);
return -EIO;
}
ret = nspu_command(nspu_desc, NSP_CMD_FW_LOAD, 0, 1, fw_buf, 0, bytes);
free(fw_buf);
+ close(fw_f);
return ret;
}
@@ -411,6 +420,9 @@ nfp_nspu_set_bar_from_symbl(nspu_desc_t *desc, const char *symbl,
int ret = 0;
sym_buf = malloc(desc->buf_size);
+ if (!sym_buf)
+ return -ENOMEM;
+
strncpy(sym_buf, symbl, strlen(symbl));
ret = nspu_command(desc, NSP_CMD_GET_SYMBOL, 1, 1, sym_buf,
NFP_SYM_DESC_LEN, strlen(symbl));
@@ -554,6 +566,7 @@ nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up)
NSP_ETH_TABLE_SIZE, 0);
if (ret) {
rte_spinlock_unlock(&desc->nsp_lock);
+ free(entries);
return ret;
}
@@ -574,6 +587,7 @@ nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up)
if (i == NSP_ETH_MAX_COUNT) {
rte_spinlock_unlock(&desc->nsp_lock);
+ free(entries);
return -EINVAL;
}
@@ -598,6 +612,7 @@ nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up)
"Hw ethernet port %d configure failed\n", port);
}
rte_spinlock_unlock(&desc->nsp_lock);
+ free(entries);
return ret;
}
@@ -606,10 +621,14 @@ nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table)
{
int ret;
+ if (!table)
+ return -EINVAL;
+
RTE_LOG(INFO, PMD, "Reading hw ethernet table...\n");
+
/* port 0 allocates the eth table and read it using NSPU */
*table = malloc(NSP_ETH_TABLE_SIZE);
- if (!table)
+ if (!*table)
return -ENOMEM;
ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, *table,
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 88321451..6f5ba2a9 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -457,6 +457,7 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
if (IS_VF(edev)) {
params.update_tx_switching_flg = 1;
params.tx_switching_flg = !flg;
+ DP_INFO(edev, "VF tx-switching is disabled\n");
}
#endif
for_each_hwfn(edev, i) {
@@ -469,8 +470,8 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
break;
}
}
- DP_INFO(edev, "vport %s VF tx-switch %s\n", flg ? "activated" : "deactivated",
- params.tx_switching_flg ? "enabled" : "disabled");
+ DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
+
return rc;
}
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 8e8536f8..01a24e54 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1547,14 +1547,14 @@ next_cqe:
/* Populate scatter gather buffer descriptor fields */
-static inline uint8_t
+static inline uint16_t
qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
{
struct qede_tx_queue *txq = p_txq;
struct eth_tx_bd *tx_bd = NULL;
dma_addr_t mapping;
- uint8_t nb_segs = 0;
+ uint16_t nb_segs = 0;
/* Check for scattered buffers */
while (m_seg) {
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index dbb25143..a4599006 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -585,7 +585,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.non_leaf_nodes_identical = 1,
.leaf_nodes_identical = 0,
- .nonleaf = {
+ {.nonleaf = {
.shaper_private_supported = 1,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
@@ -599,7 +599,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.sched_wfq_weight_max = 1,
.stats_mask = STATS_MASK_DEFAULT,
- },
+ } },
},
[TM_NODE_LEVEL_SUBPORT] = {
@@ -609,7 +609,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.non_leaf_nodes_identical = 1,
.leaf_nodes_identical = 0,
- .nonleaf = {
+ {.nonleaf = {
.shaper_private_supported = 1,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
@@ -626,7 +626,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.sched_wfq_weight_max = 1,
#endif
.stats_mask = STATS_MASK_DEFAULT,
- },
+ } },
},
[TM_NODE_LEVEL_PIPE] = {
@@ -636,7 +636,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.non_leaf_nodes_identical = 1,
.leaf_nodes_identical = 0,
- .nonleaf = {
+ {.nonleaf = {
.shaper_private_supported = 1,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
@@ -652,7 +652,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.sched_wfq_weight_max = 1,
.stats_mask = STATS_MASK_DEFAULT,
- },
+ } },
},
[TM_NODE_LEVEL_TC] = {
@@ -662,7 +662,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.non_leaf_nodes_identical = 1,
.leaf_nodes_identical = 0,
- .nonleaf = {
+ {.nonleaf = {
.shaper_private_supported = 1,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
@@ -678,7 +678,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.sched_wfq_weight_max = UINT32_MAX,
.stats_mask = STATS_MASK_DEFAULT,
- },
+ } },
},
[TM_NODE_LEVEL_QUEUE] = {
@@ -688,7 +688,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.non_leaf_nodes_identical = 0,
.leaf_nodes_identical = 1,
- .leaf = {
+ {.leaf = {
.shaper_private_supported = 0,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 0,
@@ -700,7 +700,7 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.cman_wred_context_shared_n_max = 0,
.stats_mask = STATS_MASK_QUEUE,
- },
+ } },
},
};
@@ -778,13 +778,13 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
.shaper_private_rate_max = UINT32_MAX,
.shaper_shared_n_max = 0,
- .nonleaf = {
+ {.nonleaf = {
.sched_n_children_max = UINT32_MAX,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = 1,
- },
+ } },
.stats_mask = STATS_MASK_DEFAULT,
},
@@ -796,13 +796,13 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
.shaper_private_rate_max = UINT32_MAX,
.shaper_shared_n_max = 0,
- .nonleaf = {
+ {.nonleaf = {
.sched_n_children_max = UINT32_MAX,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
- },
+ } },
.stats_mask = STATS_MASK_DEFAULT,
},
@@ -814,7 +814,7 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
.shaper_private_rate_max = UINT32_MAX,
.shaper_shared_n_max = 0,
- .nonleaf = {
+ {.nonleaf = {
.sched_n_children_max =
RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
.sched_sp_n_priorities_max =
@@ -822,7 +822,7 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
.sched_wfq_n_children_per_group_max = 1,
.sched_wfq_n_groups_max = 0,
.sched_wfq_weight_max = 1,
- },
+ } },
.stats_mask = STATS_MASK_DEFAULT,
},
@@ -834,7 +834,7 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
.shaper_private_rate_max = UINT32_MAX,
.shaper_shared_n_max = 1,
- .nonleaf = {
+ {.nonleaf = {
.sched_n_children_max =
RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
.sched_sp_n_priorities_max = 1,
@@ -842,7 +842,7 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
- },
+ } },
.stats_mask = STATS_MASK_DEFAULT,
},
@@ -855,11 +855,11 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
.shaper_shared_n_max = 0,
- .leaf = {
+ {.leaf = {
.cman_head_drop_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
- },
+ } },
.stats_mask = STATS_MASK_QUEUE,
},
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 403cfdbb..74f151c4 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -682,7 +682,7 @@ eth_szedata2_tx(void *queue,
uint32_t hwpkt_len;
uint32_t unlock_size;
uint32_t rem_len;
- uint8_t mbuf_segs;
+ uint16_t mbuf_segs;
uint16_t pkt_left = nb_pkts;
if (sze_q->sze == NULL || nb_pkts == 0)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index d2576d5e..e0328f61 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -97,6 +97,9 @@ static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
static void virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
+static int virtio_intr_enable(struct rte_eth_dev *dev);
+static int virtio_intr_disable(struct rte_eth_dev *dev);
+
static int virtio_dev_queue_stats_mapping_set(
struct rte_eth_dev *eth_dev,
uint16_t queue_id,
@@ -618,7 +621,7 @@ virtio_dev_close(struct rte_eth_dev *dev)
virtio_queues_unbind_intr(dev);
if (intr_conf->lsc || intr_conf->rxq) {
- rte_intr_disable(dev->intr_handle);
+ virtio_intr_disable(dev);
rte_intr_efd_disable(dev->intr_handle);
rte_free(dev->intr_handle->intr_vec);
dev->intr_handle->intr_vec = NULL;
@@ -1160,6 +1163,34 @@ virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
}
static int
+virtio_intr_enable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (rte_intr_enable(dev->intr_handle) < 0)
+ return -1;
+
+ if (!hw->virtio_user_dev)
+ hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+ return 0;
+}
+
+static int
+virtio_intr_disable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (rte_intr_disable(dev->intr_handle) < 0)
+ return -1;
+
+ if (!hw->virtio_user_dev)
+ hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+ return 0;
+}
+
+static int
virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
{
uint64_t host_features;
@@ -1228,7 +1259,7 @@ virtio_interrupt_handler(void *param)
isr = vtpci_isr(hw);
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
- if (rte_intr_enable(dev->intr_handle) < 0)
+ if (virtio_intr_enable(dev) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
if (isr & VIRTIO_PCI_ISR_CONFIG) {
@@ -1348,7 +1379,7 @@ virtio_configure_intr(struct rte_eth_dev *dev)
* to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
* (22) will be ignored.
*/
- if (rte_intr_enable(dev->intr_handle) < 0) {
+ if (virtio_intr_enable(dev) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -1;
}
@@ -1388,7 +1419,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
}
/* If host does not support both status and MSI-X then disable LSC */
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
+ hw->use_msix != VIRTIO_MSIX_NONE)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
@@ -1588,13 +1620,13 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (!hw->virtio_user_dev) {
ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
- return ret;
+ goto out;
}
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
- return ret;
+ goto out;
/* Setup interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
@@ -1602,6 +1634,10 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
virtio_interrupt_handler, eth_dev);
return 0;
+
+out:
+ rte_free(eth_dev->data->mac_addrs);
+ return ret;
}
static int
@@ -1801,9 +1837,9 @@ virtio_dev_start(struct rte_eth_dev *dev)
*/
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rxq) {
- rte_intr_disable(dev->intr_handle);
+ virtio_intr_disable(dev);
- if (rte_intr_enable(dev->intr_handle) < 0) {
+ if (virtio_intr_enable(dev) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -EIO;
}
@@ -1912,7 +1948,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "stop");
if (intr_conf->lsc || intr_conf->rxq)
- rte_intr_disable(dev->intr_handle);
+ virtio_intr_disable(dev);
hw->started = 0;
memset(&link, 0, sizeof(link));
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 55b717c0..9574498f 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -57,7 +57,8 @@
* The remaining space is defined by each driver as the per-driver
* configuration space.
*/
-#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
+#define VIRTIO_PCI_CONFIG(hw) \
+ (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
static inline int
check_vq_phys_addr_ok(struct virtqueue *vq)
@@ -617,7 +618,9 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
uint16_t flags = ((uint16_t *)&cap)[1];
if (flags & PCI_MSIX_ENABLE)
- hw->use_msix = 1;
+ hw->use_msix = VIRTIO_MSIX_ENABLED;
+ else
+ hw->use_msix = VIRTIO_MSIX_DISABLED;
}
if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
@@ -710,3 +713,39 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
return 0;
}
+
+enum virtio_msix_status
+vtpci_msix_detect(struct rte_pci_device *dev)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
+ return VIRTIO_MSIX_NONE;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR,
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ return VIRTIO_MSIX_ENABLED;
+ else
+ return VIRTIO_MSIX_DISABLED;
+ }
+
+ pos = cap.cap_next;
+ }
+
+ return VIRTIO_MSIX_NONE;
+}
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 36d452c0..3c5ce66c 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -314,6 +314,12 @@ struct virtio_net_config {
/* The alignment to use between consumer and producer parts of vring. */
#define VIRTIO_PCI_VRING_ALIGN 4096
+enum virtio_msix_status {
+ VIRTIO_MSIX_NONE = 0,
+ VIRTIO_MSIX_DISABLED = 1,
+ VIRTIO_MSIX_ENABLED = 2
+};
+
static inline int
vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
{
@@ -339,6 +345,8 @@ void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
uint8_t vtpci_isr(struct virtio_hw *);
+enum virtio_msix_status vtpci_msix_detect(struct rte_pci_device *dev);
+
extern const struct virtio_pci_ops legacy_ops;
extern const struct virtio_pci_ops modern_ops;
extern const struct virtio_pci_ops virtio_user_ops;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index c24284d6..70ed2272 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -84,7 +84,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
struct rte_security_session_conf sess_conf = {
.action_type = sa->type,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
- .ipsec = {
+ {.ipsec = {
.spi = sa->spi,
.salt = sa->salt,
.options = { 0 },
@@ -94,7 +94,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
sa->flags == IP6_TUNNEL) ?
RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
- },
+ } },
.crypto_xform = sa->xforms
};
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 83ac0d80..1c585165 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -289,7 +289,7 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
/* update header's fields */
hdr->pkt_len = (uint16_t)(hdr->data_len + pkt->pkt_len);
- hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
+ hdr->nb_segs = pkt->nb_segs + 1;
/* copy metadata from source packet*/
hdr->port = pkt->port;
diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c
index 57ad3397..6ad23013 100644
--- a/lib/librte_distributor/rte_distributor.c
+++ b/lib/librte_distributor/rte_distributor.c
@@ -442,7 +442,7 @@ rte_distributor_process_v1705(struct rte_distributor *d,
/*
* Uncommenting the next line will cause the find_match
- * function to be optimised out, making this function
+ * function to be optimized out, making this function
* do parallel (non-atomic) distribution
*/
/* matches[j] = 0; */
@@ -536,7 +536,7 @@ MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d,
/*
* Return the number of packets in-flight in a distributor, i.e. packets
- * being workered on or queued up in a backlog.
+ * being worked on or queued up in a backlog.
*/
static inline unsigned int
total_outstanding(const struct rte_distributor *d)
@@ -663,7 +663,7 @@ rte_distributor_create_v1705(const char *name,
#endif
/*
- * Set up the backog tags so they're pointing at the second cache
+ * Set up the backlog tags so they're pointing at the second cache
* line for performance during flow matching
*/
for (i = 0 ; i < num_workers ; i++)
diff --git a/lib/librte_distributor/rte_distributor.h b/lib/librte_distributor/rte_distributor.h
index 9b9efdbe..cbeed04d 100644
--- a/lib/librte_distributor/rte_distributor.h
+++ b/lib/librte_distributor/rte_distributor.h
@@ -71,7 +71,7 @@ struct rte_mbuf;
* @param alg_type
* Call the legacy API, or use the new burst API. legacy uses 32-bit
* flow ID, and works on a single packet at a time. Latest uses 15-
- * bit flow ID and works on up to 8 packets at a time to worers.
+ * bit flow ID and works on up to 8 packets at a time to workers.
* @return
* The newly created distributor instance
*/
diff --git a/lib/librte_distributor/rte_distributor_private.h b/lib/librte_distributor/rte_distributor_private.h
index 250b23e1..24f41b95 100644
--- a/lib/librte_distributor/rte_distributor_private.h
+++ b/lib/librte_distributor/rte_distributor_private.h
@@ -90,7 +90,7 @@ union rte_distributor_buffer_v20 {
/*
* Transfer up to 8 mbufs at a time to/from workers, and
- * flow matching algorithm optimised for 8 flow IDs at a time
+ * flow matching algorithm optimized for 8 flow IDs at a time
*/
#define RTE_DIST_BURST_SIZE 8
diff --git a/lib/librte_distributor/rte_distributor_v20.c b/lib/librte_distributor/rte_distributor_v20.c
index 9adda52b..5be6efd4 100644
--- a/lib/librte_distributor/rte_distributor_v20.c
+++ b/lib/librte_distributor/rte_distributor_v20.c
@@ -345,7 +345,8 @@ rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0);
/* return the number of packets in-flight in a distributor, i.e. packets
- * being workered on or queued up in a backlog. */
+ * being worked on or queued up in a backlog.
+ */
static inline unsigned
total_outstanding(const struct rte_distributor_v20 *d)
{
diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
index be404136..e894b75e 100644
--- a/lib/librte_eal/common/eal_common_log.c
+++ b/lib/librte_eal/common/eal_common_log.c
@@ -249,7 +249,7 @@ static const struct logtype logtype_strings[] = {
{RTE_LOGTYPE_USER8, "user8"}
};
-/* Logging should be first initialzer (before drivers and bus) */
+/* Logging should be first initializer (before drivers and bus) */
RTE_INIT_PRIO(rte_log_init, 101);
static void
rte_log_init(void)
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
index c3a26192..e4dafda1 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
@@ -225,7 +225,7 @@ rte_memcpy_func(void *dst, const void *src, size_t n)
* We split the remaining bytes (which will be less than 256) into
* 64byte (2^6) chunks.
* Using incrementing integers in the case labels of a switch statement
- * enourages the compiler to use a jump table. To get incrementing
+ * encourages the compiler to use a jump table. To get incrementing
* integers, we shift the 2 relevant bits to the LSB position to first
* get decrementing integers, and then subtract.
*/
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h b/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
index ca9d1dc5..75f74897 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
@@ -164,7 +164,7 @@ rte_memcpy_func(void *dst, const void *src, size_t n)
* We split the remaining bytes (which will be less than 256) into
* 64byte (2^6) chunks.
* Using incrementing integers in the case labels of a switch statement
- * enourages the compiler to use a jump table. To get incrementing
+ * encourages the compiler to use a jump table. To get incrementing
* integers, we shift the 2 relevant bits to the LSB position to first
* get decrementing integers, and then subtract.
*/
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
index 09b66819..8e4e71cc 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -217,7 +217,7 @@ int rte_eal_primary_proc_alive(const char *config_file_path);
/**
* Usage function typedef used by the application usage function.
*
- * Use this function typedef to define and call rte_set_applcation_usage_hook()
+ * Use this function typedef to define and call rte_set_application_usage_hook()
* routine.
*/
typedef void (*rte_usage_hook_t)(const char * prgname);
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 16564d41..6c2d3566 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -218,7 +218,7 @@ int rte_log_cur_msg_logtype(void);
* The string identifying the log type.
* @return
* - >0: success, the returned value is the log type identifier.
- * - (-ENONEM): cannot allocate memory.
+ * - (-ENOMEM): cannot allocate memory.
*/
int rte_log_register(const char *name);
diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h
index 24ae8363..aeff1f05 100644
--- a/lib/librte_eal/common/include/rte_random.h
+++ b/lib/librte_eal/common/include/rte_random.h
@@ -88,4 +88,4 @@ rte_rand(void)
#endif
-#endif /* _RTE_PER_LCORE_H_ */
+#endif /* _RTE_RANDOM_H_ */
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index d08cf48a..fa018074 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -78,7 +78,7 @@ extern "C" {
* 0-15 = release candidates
* 16 = release
*/
-#define RTE_VER_RELEASE 3
+#define RTE_VER_RELEASE 4
/**
* Macro to compute a version number usable for comparisons
diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c
index 889dffd2..98bcd37b 100644
--- a/lib/librte_eal/common/malloc_elem.c
+++ b/lib/librte_eal/common/malloc_elem.c
@@ -252,7 +252,7 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
}
/*
- * joing two struct malloc_elem together. elem1 and elem2 must
+ * join two struct malloc_elem together. elem1 and elem2 must
* be contiguous in memory.
*/
static inline void
diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c
index 09b758c9..ae97e6b7 100644
--- a/lib/librte_eal/common/rte_service.c
+++ b/lib/librte_eal/common/rte_service.c
@@ -153,7 +153,7 @@ service_valid(uint32_t id)
service = &rte_services[id]; \
} while (0)
-/* returns 1 if statistics should be colleced for service
+/* returns 1 if statistics should be collected for service
* Returns 0 if statistics should not be collected for service
*/
static inline int
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index a54b822a..16a181c3 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -344,7 +344,7 @@ void numa_error(char *where)
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
* virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
* in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
- * map continguous physical blocks in contiguous virtual blocks.
+ * map contiguous physical blocks in contiguous virtual blocks.
*/
static unsigned
map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c b/lib/librte_eal/linuxapp/eal/eal_timer.c
index 24349dab..a616928b 100644
--- a/lib/librte_eal/linuxapp/eal/eal_timer.c
+++ b/lib/librte_eal/linuxapp/eal/eal_timer.c
@@ -113,7 +113,7 @@ static pthread_t msb_inc_thread_id;
/*
* This function runs on a specific thread to update a global variable
- * containing used to process MSB of the HPET (unfortunatelly, we need
+ * containing used to process MSB of the HPET (unfortunately, we need
* this because hpet is 32 bits by default under linux).
*/
static void
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
index 1c30d12b..5da7f91f 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
@@ -241,7 +241,7 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = e1000_get_cable_length_m88;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
- /* Check if this PHY is confgured for media swap. */
+ /* Check if this PHY is configured for media swap. */
if (phy->id == M88E1112_E_PHY_ID) {
u16 data;
diff --git a/lib/librte_efd/rte_efd.c b/lib/librte_efd/rte_efd.c
index 8771d042..7d0b5cc6 100644
--- a/lib/librte_efd/rte_efd.c
+++ b/lib/librte_efd/rte_efd.c
@@ -952,7 +952,7 @@ revert_groups(struct efd_offline_group_rules *previous_group,
* This operation was still successful, and entry contains a valid update
* RTE_EFD_UPDATE_FAILED
* Either the EFD failed to find a suitable perfect hash or the group was full
- * This is a fatal error, and the table is now in an indeterminite state
+ * This is a fatal error, and the table is now in an indeterminate state
* RTE_EFD_UPDATE_NO_CHANGE
* Operation resulted in no change to the table (same value already exists)
* 0
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 18e474db..341c2d62 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -1062,7 +1062,7 @@ struct rte_eth_rxq_info {
/**
* Ethernet device TX queue information structure.
- * Used to retieve information about configured queue.
+ * Used to retrieve information about configured queue.
*/
struct rte_eth_txq_info {
struct rte_eth_txconf conf; /**< queue config parameters. */
diff --git a/lib/librte_ether/rte_tm_driver.h b/lib/librte_ether/rte_tm_driver.h
index b2e8ccf8..2376943d 100644
--- a/lib/librte_ether/rte_tm_driver.h
+++ b/lib/librte_ether/rte_tm_driver.h
@@ -183,7 +183,7 @@ typedef int (*rte_tm_node_stats_update_t)(struct rte_eth_dev *dev,
typedef int (*rte_tm_node_wfq_weight_mode_update_t)(
struct rte_eth_dev *dev,
uint32_t node_id,
- int *wfq_weigth_mode,
+ int *wfq_weight_mode,
uint32_t n_sp_priorities,
struct rte_tm_error *error);
diff --git a/lib/librte_gro/gro_tcp4.h b/lib/librte_gro/gro_tcp4.h
index f41dcee3..0a817162 100644
--- a/lib/librte_gro/gro_tcp4.h
+++ b/lib/librte_gro/gro_tcp4.h
@@ -116,7 +116,7 @@ struct gro_tcp4_tbl {
* This function creates a TCP/IPv4 reassembly table.
*
* @param socket_id
- * socket index for allocating TCP/IPv4 reassemblt table
+ * socket index for allocating TCP/IPv4 reassemble table
* @param max_flow_num
* the maximum number of flows in the TCP/IPv4 GRO table
* @param max_item_per_flow
diff --git a/lib/librte_gso/rte_gso.h b/lib/librte_gso/rte_gso.h
index 4b77176f..dbaedec7 100644
--- a/lib/librte_gso/rte_gso.h
+++ b/lib/librte_gso/rte_gso.h
@@ -103,7 +103,7 @@ struct rte_gso_ctx {
* Before calling rte_gso_segment(), applications must set proper ol_flags
* for the packet. The GSO library uses the same macros as that of TSO.
* For example, set PKT_TX_TCP_SEG and PKT_TX_IPV4 in ol_flags to segment
- * a TCP/IPv4 packet. If rte_gso_segment() succceds, the PKT_TX_TCP_SEG
+ * a TCP/IPv4 packet. If rte_gso_segment() succeeds, the PKT_TX_TCP_SEG
* flag is removed for all GSO segments and the input packet.
*
* Each of the newly-created GSO segments is organized as a two-segment
diff --git a/lib/librte_ip_frag/ip_frag_internal.c b/lib/librte_ip_frag/ip_frag_internal.c
index 09b755c9..46c44fff 100644
--- a/lib/librte_ip_frag/ip_frag_internal.c
+++ b/lib/librte_ip_frag/ip_frag_internal.c
@@ -160,7 +160,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
}
/*
- * errorneous packet: either exceeed max allowed number of fragments,
+ * erroneous packet: either exceed max allowed number of fragments,
* or duplicate first/last fragment encountered.
*/
if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {
diff --git a/lib/librte_ip_frag/rte_ip_frag.h b/lib/librte_ip_frag/rte_ip_frag.h
index 35d0ecc3..9f8cede8 100644
--- a/lib/librte_ip_frag/rte_ip_frag.h
+++ b/lib/librte_ip_frag/rte_ip_frag.h
@@ -70,7 +70,7 @@ struct ip_frag {
struct rte_mbuf *mb; /**< fragment mbuf */
};
-/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+/** @internal <src addr, dst_addr, id> to uniquely identify fragmented datagram. */
struct ip_frag_key {
uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
uint32_t id; /**< dst address */
@@ -118,7 +118,7 @@ struct rte_ip_frag_tbl {
uint32_t entry_mask; /**< hash value mask. */
uint32_t max_entries; /**< max entries allowed. */
uint32_t use_entries; /**< entries in use. */
- uint32_t bucket_entries; /**< hash assocaitivity. */
+ uint32_t bucket_entries; /**< hash associativity. */
uint32_t nb_entries; /**< total size of the table. */
uint32_t nb_buckets; /**< num of associativity lines. */
struct ip_frag_pkt *last; /**< last used entry. */
@@ -303,7 +303,7 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
* @param ip_hdr
* Pointer to the IPV4 header inside the fragment.
* @return
- * Pointer to mbuf for reassebled packet, or NULL if:
+ * Pointer to mbuf for reassembled packet, or NULL if:
* - an error occurred.
* - not all fragments of the packet are collected yet.
*/
diff --git a/lib/librte_ip_frag/rte_ipv4_reassembly.c b/lib/librte_ip_frag/rte_ipv4_reassembly.c
index b1330896..040bd70a 100644
--- a/lib/librte_ip_frag/rte_ipv4_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv4_reassembly.c
@@ -93,7 +93,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
/* update mbuf fields for reassembled packet. */
m->ol_flags |= PKT_TX_IP_CKSUM;
- /* update ipv4 header for the reassmebled packet */
+ /* update ipv4 header for the reassembled packet */
ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
@@ -117,7 +117,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
* @param ip_hdr
* Pointer to the IPV4 header inside the fragment.
* @return
- * Pointer to mbuf for reassebled packet, or NULL if:
+ * Pointer to mbuf for reassembled packet, or NULL if:
* - an error occurred.
* - not all fragments of the packet are collected yet.
*/
diff --git a/lib/librte_jobstats/rte_jobstats.h b/lib/librte_jobstats/rte_jobstats.h
index 70e034ca..e1591562 100644
--- a/lib/librte_jobstats/rte_jobstats.h
+++ b/lib/librte_jobstats/rte_jobstats.h
@@ -313,7 +313,7 @@ rte_jobstats_set_max(struct rte_jobstats *job, uint64_t period);
*
* @param job
* Job object.
- * @param update_pedriod_cb
+ * @param update_period_cb
* Callback to set. If NULL restore default update function.
*/
void
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 5ee38e9a..8eca8c03 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -340,7 +340,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
/* Get an available slot from the pool */
slot = kni_memzone_pool_alloc();
if (!slot) {
- RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n",
+ RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n",
kni_memzone_pool.max_ifaces);
return NULL;
}
@@ -659,7 +659,7 @@ kni_allocate_mbufs(struct rte_kni *kni)
phys[i] = va2pa(pkts[i]);
}
- /* No pkt mbuf alocated */
+ /* No pkt mbuf allocated */
if (i <= 0)
return;
diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h
index d1950791..d43b5b28 100644
--- a/lib/librte_kni/rte_kni.h
+++ b/lib/librte_kni/rte_kni.h
@@ -228,7 +228,7 @@ const char *rte_kni_get_name(const struct rte_kni *kni);
* @param kni
* pointer to struct rte_kni.
* @param ops
- * ponter to struct rte_kni_ops.
+ * pointer to struct rte_kni_ops.
*
* @return
* On success: 0
diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h
index c7cd5c26..6f2c3cb3 100644
--- a/lib/librte_kni/rte_kni_fifo.h
+++ b/lib/librte_kni/rte_kni_fifo.h
@@ -73,7 +73,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num)
}
/**
- * Get up to num elements from the fifo. Return the number actully read
+ * Get up to num elements from the fifo. Return the number actually read
*/
static inline unsigned
kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 6d91f7d3..7e326bbc 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -850,10 +850,10 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
} while (0)
/**
- * Allocate an unitialized mbuf from mempool *mp*.
+ * Allocate an uninitialized mbuf from mempool *mp*.
*
* This function can be used by PMDs (especially in RX functions) to
- * allocate an unitialized mbuf. The driver is responsible of
+ * allocate an uninitialized mbuf. The driver is responsible of
* initializing all the required fields. See rte_pktmbuf_reset().
* For standard needs, prefer rte_pktmbuf_alloc().
*
@@ -1778,7 +1778,7 @@ const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
* @param len
* The amount of bytes to read.
* @param buf
- * The buffer where data is copied if it is not contigous in mbuf
+ * The buffer where data is copied if it is not contiguous in mbuf
* data. Its length should be at least equal to the len parameter.
* @return
* The pointer to the data, either in the mbuf if it is contiguous,
diff --git a/lib/librte_net/net_crc_neon.h b/lib/librte_net/net_crc_neon.h
index 201b2c88..cb8f63d9 100644
--- a/lib/librte_net/net_crc_neon.h
+++ b/lib/librte_net/net_crc_neon.h
@@ -64,7 +64,7 @@ struct crc_pmull_ctx crc16_ccitt_pmull __rte_aligned(16);
* FOLD = XOR(T1, T2, DATA)
*
* @param data_block 16 byte data block
- * @param precomp precomputed rk1 constanst
+ * @param precomp precomputed rk1 constant
* @param fold running 16 byte folded data
*
* @return New 16 byte folded data
diff --git a/lib/librte_net/net_crc_sse.h b/lib/librte_net/net_crc_sse.h
index ac93637b..7eae1479 100644
--- a/lib/librte_net/net_crc_sse.h
+++ b/lib/librte_net/net_crc_sse.h
@@ -66,7 +66,7 @@ struct crc_pclmulqdq_ctx crc16_ccitt_pclmulqdq __rte_aligned(16);
* @param data_block
* 16 byte data block
* @param precomp
- * Precomputed rk1 constanst
+ * Precomputed rk1 constant
* @param fold
* Current16 byte folded data
*
diff --git a/lib/librte_net/rte_ip.h b/lib/librte_net/rte_ip.h
index 4491b86e..73ec398f 100644
--- a/lib/librte_net/rte_ip.h
+++ b/lib/librte_net/rte_ip.h
@@ -237,7 +237,7 @@ rte_raw_cksum(const void *buf, size_t len)
* @param off
* The offset in bytes to start the checksum.
* @param len
- * The length in bytes of the data to ckecksum.
+ * The length in bytes of the data to checksum.
* @param cksum
* A pointer to the checksum, filled on success.
* @return
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index e6182d35..29a6c99b 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -153,6 +153,8 @@ pdump_pktmbuf_copy(struct rte_mbuf *m, struct rte_mempool *mp)
do {
nseg++;
if (pdump_pktmbuf_copy_data(seg, m) < 0) {
+ if (seg != m_dup)
+ rte_pktmbuf_free_seg(seg);
rte_pktmbuf_free(m_dup);
return NULL;
}
@@ -225,7 +227,7 @@ pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
}
static int
-pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
+pdump_register_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
@@ -279,7 +281,7 @@ pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
}
static int
-pdump_regitser_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
+pdump_register_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
@@ -400,7 +402,7 @@ set_pdump_rxtx_cbs(struct pdump_request *p)
/* register RX callback */
if (flags & RTE_PDUMP_FLAG_RX) {
end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
- ret = pdump_regitser_rx_callbacks(end_q, port, queue, ring, mp,
+ ret = pdump_register_rx_callbacks(end_q, port, queue, ring, mp,
operation);
if (ret < 0)
return ret;
@@ -409,7 +411,7 @@ set_pdump_rxtx_cbs(struct pdump_request *p)
/* register TX callback */
if (flags & RTE_PDUMP_FLAG_TX) {
end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
- ret = pdump_regitser_tx_callbacks(end_q, port, queue, ring, mp,
+ ret = pdump_register_tx_callbacks(end_q, port, queue, ring, mp,
operation);
if (ret < 0)
return ret;
diff --git a/lib/librte_pipeline/rte_pipeline.h b/lib/librte_pipeline/rte_pipeline.h
index f3663483..fdc44a79 100644
--- a/lib/librte_pipeline/rte_pipeline.h
+++ b/lib/librte_pipeline/rte_pipeline.h
@@ -483,7 +483,7 @@ int rte_pipeline_table_entry_delete(struct rte_pipeline *p,
* @param keys
* Array containing table entry keys
* @param entries
- * Array containung new contents for every table entry identified by key
+ * Array containing new contents for every table entry identified by key
* @param n_keys
* Number of keys to add
* @param key_found
diff --git a/lib/librte_power/rte_power_acpi_cpufreq.c b/lib/librte_power/rte_power_acpi_cpufreq.c
index 01ac5acb..6b0cdb2e 100644
--- a/lib/librte_power/rte_power_acpi_cpufreq.c
+++ b/lib/librte_power/rte_power_acpi_cpufreq.c
@@ -267,7 +267,7 @@ power_get_available_freqs(struct rte_power_info *pi)
}
ret = 0;
- POWER_DEBUG_TRACE("%d frequencie(s) of lcore %u are available\n",
+ POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n",
count, pi->lcore_id);
out:
fclose(f);
@@ -359,7 +359,7 @@ rte_power_acpi_cpufreq_init(unsigned lcore_id)
}
RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u "
- "power manamgement\n", lcore_id);
+ "power management\n", lcore_id);
rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_USED);
return 0;
diff --git a/lib/librte_power/rte_power_acpi_cpufreq.h b/lib/librte_power/rte_power_acpi_cpufreq.h
index eee0ca0a..bc20dfd6 100644
--- a/lib/librte_power/rte_power_acpi_cpufreq.h
+++ b/lib/librte_power/rte_power_acpi_cpufreq.h
@@ -180,7 +180,7 @@ int rte_power_acpi_cpufreq_freq_max(unsigned lcore_id);
*
* @return
* - 1 on success with frequency changed.
- * - 0 on success without frequency chnaged.
+ * - 0 on success without frequency changed.
* - Negative on error.
*/
int rte_power_acpi_cpufreq_freq_min(unsigned lcore_id);
diff --git a/lib/librte_reorder/rte_reorder.h b/lib/librte_reorder/rte_reorder.h
index 4cd8de76..dc83f8e6 100644
--- a/lib/librte_reorder/rte_reorder.h
+++ b/lib/librte_reorder/rte_reorder.h
@@ -147,9 +147,9 @@ rte_reorder_free(struct rte_reorder_buffer *b);
* -1 on error
* On error case, rte_errno will be set appropriately:
* - ENOSPC - Cannot move existing mbufs from reorder buffer to accommodate
- * ealry mbuf, but it can be accommodated by performing drain and then insert.
+ * early mbuf, but it can be accommodated by performing drain and then insert.
* - ERANGE - Too early or late mbuf which is vastly out of range of expected
- * window should be ingnored without any handling.
+ * window should be ignored without any handling.
*/
int
rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf);
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 5e9b3b7b..e9244381 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -409,6 +409,12 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
n = max;
*old_head = r->prod.head;
+
+ /* add rmb barrier to avoid load/load reorder in weak
+ * memory model. It is noop on x86
+ */
+ rte_smp_rmb();
+
const uint32_t cons_tail = r->cons.tail;
/*
* The subtraction is done between two unsigned 32bits value
@@ -517,6 +523,12 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
n = max;
*old_head = r->cons.head;
+
+ /* add rmb barrier to avoid load/load reorder in weak
+ * memory model. It is noop on x86
+ */
+ rte_smp_rmb();
+
const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
diff --git a/lib/librte_sched/rte_red.h b/lib/librte_sched/rte_red.h
index ca122275..6edf914f 100644
--- a/lib/librte_sched/rte_red.h
+++ b/lib/librte_sched/rte_red.h
@@ -139,7 +139,7 @@ rte_red_config_init(struct rte_red_config *red_cfg,
/**
* @brief Generate random number for RED
*
- * Implemenetation based on:
+ * Implementation based on:
* http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/
*
* 10 bit shift has been found through empirical tests (was 16).
@@ -200,7 +200,7 @@ __rte_red_calc_qempty_factor(uint8_t wq_log2, uint16_t m)
* Now using basic math we compute 2^n:
* 2^(f+n) = 2^f * 2^n
* 2^f - we use lookup table
- * 2^n - can be replaced with bit shift right oeprations
+ * 2^n - can be replaced with bit shift right operations
*/
f = (n >> 6) & 0xf;
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index a2d0d685..7252f850 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -1020,7 +1020,7 @@ rte_sched_subport_read_stats(struct rte_sched_port *port,
memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
- /* Subport TC ovesubscription status */
+ /* Subport TC oversubscription status */
*tc_ov = s->tc_ov;
return 0;
diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h
index 7e687d29..653929b9 100644
--- a/lib/librte_security/rte_security.h
+++ b/lib/librte_security/rte_security.h
@@ -266,6 +266,7 @@ struct rte_security_session_conf {
/**< Type of action to be performed on the session */
enum rte_security_session_protocol protocol;
/**< Security protocol to be configured */
+ RTE_STD_C11
union {
struct rte_security_ipsec_xform ipsec;
struct rte_security_macsec_xform macsec;
@@ -406,6 +407,7 @@ struct rte_security_stats {
enum rte_security_session_protocol protocol;
/**< Security protocol to be configured */
+ RTE_STD_C11
union {
struct rte_security_macsec_stats macsec;
struct rte_security_ipsec_stats ipsec;
@@ -486,6 +488,7 @@ struct rte_security_capability_idx {
enum rte_security_session_action_type action;
enum rte_security_session_protocol protocol;
+ RTE_STD_C11
union {
struct {
enum rte_security_ipsec_sa_protocol proto;
diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c
index 28decc39..88826f57 100644
--- a/lib/librte_timer/rte_timer.c
+++ b/lib/librte_timer/rte_timer.c
@@ -195,7 +195,7 @@ timer_set_running_state(struct rte_timer *tim)
/*
* Return a skiplist level for a new entry.
- * This probabalistically gives a level with p=1/4 that an entry at level n
+ * This probabilistically gives a level with p=1/4 that an entry at level n
* will also appear at level n+1.
*/
static uint32_t
diff --git a/test/test/autotest_test_funcs.py b/test/test/autotest_test_funcs.py
index 8da8fcd7..15fcb7cf 100644
--- a/test/test/autotest_test_funcs.py
+++ b/test/test/autotest_test_funcs.py
@@ -62,21 +62,30 @@ def dump_autotest(child, test_name):
def memory_autotest(child, test_name):
+ lines = 0
+ error = ''
child.sendline(test_name)
- regexp = "phys:0x[0-9a-f]*, len:([0-9]*), virt:0x[0-9a-f]*, " \
- "socket_id:[0-9]*"
- index = child.expect([regexp, pexpect.TIMEOUT], timeout=180)
- if index != 0:
- return -1, "Fail [Timeout]"
- size = int(child.match.groups()[0], 16)
- if size <= 0:
- return -1, "Fail [Bad size]"
- index = child.expect(["Test OK", "Test Failed",
- pexpect.TIMEOUT], timeout=10)
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
+ while True:
+ regexp = "IOVA:0x[0-9a-f]*, len:([0-9]*), virt:0x[0-9a-f]*, " \
+ "socket_id:[0-9]*"
+ index = child.expect([regexp, "Test OK", "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+ if index == 3:
+ return -1, "Fail [Timeout]"
+ elif index == 1:
+ break
+ elif index == 2:
+ return -1, "Fail"
+ else:
+ lines = lines + 1
+ size = int(child.match.groups()[0], 10)
+ if size <= 0:
+ error = 'Bad size'
+
+ if lines <= 0:
+ return -1, "Fail [No entries]"
+ if error != '':
+ return -1, "Fail [{}]".format(error)
return 0, "Success"
diff --git a/test/test/test_memzone.c b/test/test/test_memzone.c
index c9394c4a..1cf235a9 100644
--- a/test/test/test_memzone.c
+++ b/test/test/test_memzone.c
@@ -176,6 +176,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 2M\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
@@ -187,6 +191,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 2M\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
/* Check if 1GB huge pages are unavailable, that function fails unless
* HINT flag is indicated
@@ -202,6 +210,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 2M\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
RTE_MEMZONE_1GB);
@@ -224,6 +236,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 1G\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
@@ -235,6 +251,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 1G\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
/* Check if 1GB huge pages are unavailable, that function fails unless
* HINT flag is indicated
@@ -250,12 +270,20 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 1G\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
RTE_MEMZONE_2MB);
if (mz != NULL) {
printf("MEMZONE FLAG 2MB\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
}
if (hugepage_2MB_avail && hugepage_1GB_avail) {
@@ -285,6 +313,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 16M\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
@@ -296,6 +328,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 16M\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
/* Check if 1GB huge pages are unavailable, that function fails
* unless HINT flag is indicated
@@ -312,6 +348,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 16M\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
mz = rte_memzone_reserve("flag_zone_16G", size,
SOCKET_ID_ANY, RTE_MEMZONE_16GB);
@@ -333,6 +373,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 16G\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
SOCKET_ID_ANY, RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
@@ -344,6 +388,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 16G\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
/* Check if 1GB huge pages are unavailable, that function fails
* unless HINT flag is indicated
@@ -360,6 +408,10 @@ test_memzone_reserve_flags(void)
printf("hugepage_sz not equal 16G\n");
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
mz = rte_memzone_reserve("flag_zone_16M", size,
SOCKET_ID_ANY, RTE_MEMZONE_16MB);
if (mz != NULL) {
@@ -434,6 +486,12 @@ test_memzone_reserve_max(void)
rte_memzone_dump(stdout);
return -1;
}
+
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+
return 0;
}
@@ -473,6 +531,12 @@ test_memzone_reserve_max_aligned(void)
rte_memzone_dump(stdout);
return -1;
}
+
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+
return 0;
}
@@ -593,6 +657,28 @@ test_memzone_aligned(void)
if (is_memory_overlap(memzone_aligned_512->iova, memzone_aligned_512->len,
memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
+
+ /* free all used zones */
+ if (rte_memzone_free(memzone_aligned_32)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+ if (rte_memzone_free(memzone_aligned_128)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+ if (rte_memzone_free(memzone_aligned_256)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+ if (rte_memzone_free(memzone_aligned_512)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+ if (rte_memzone_free(memzone_aligned_1024)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
return 0;
}
@@ -638,6 +724,11 @@ check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
return -1;
}
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+
return 0;
}
@@ -758,7 +849,7 @@ test_memzone_free(void)
}
static int
-test_memzone(void)
+test_memzone_basic(void)
{
const struct rte_memzone *memzone1;
const struct rte_memzone *memzone2;
@@ -837,6 +928,40 @@ test_memzone(void)
if (mz != NULL)
return -1;
+ if (rte_memzone_free(memzone1)) {
+ printf("Fail memzone free - memzone1\n");
+ return -1;
+ }
+ if (rte_memzone_free(memzone2)) {
+ printf("Fail memzone free - memzone2\n");
+ return -1;
+ }
+ if (memzone3 && rte_memzone_free(memzone3)) {
+ printf("Fail memzone free - memzone3\n");
+ return -1;
+ }
+ if (rte_memzone_free(memzone4)) {
+ printf("Fail memzone free - memzone4\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int memzone_calk_called;
+static void memzone_walk_clb(const struct rte_memzone *mz __rte_unused,
+ void *arg __rte_unused)
+{
+ memzone_calk_called = 1;
+}
+
+static int
+test_memzone(void)
+{
+ printf("test basic memzone API\n");
+ if (test_memzone_basic() < 0)
+ return -1;
+
printf("test free memzone\n");
if (test_memzone_free() < 0)
return -1;
@@ -869,6 +994,14 @@ test_memzone(void)
if (test_memzone_reserve_max_aligned() < 0)
return -1;
+ printf("check memzone cleanup\n");
+ rte_memzone_walk(memzone_walk_clb, NULL);
+ if (memzone_calk_called) {
+ printf("there are some memzones left after test\n");
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+
return 0;
}
diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index a539995c..f9f7aee0 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -539,7 +539,7 @@ def bind_all(dev_list, driver, force=False):
for d in dev_list:
bind_one(d, driver, force)
- # For kenels < 3.15 when binding devices to a generic driver
+ # For kernels < 3.15 when binding devices to a generic driver
# (i.e. one that doesn't have a PCI ID table) using new_id, some devices
# that are not bound to any other driver could be bound even if no one has
# asked them to. hence, we check the list of drivers again, and see if