aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_ether
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_ether')
-rw-r--r--lib/librte_ether/Makefile35
-rw-r--r--lib/librte_ether/ethdev_profile.c33
-rw-r--r--lib/librte_ether/ethdev_profile.h33
-rw-r--r--lib/librte_ether/meson.build27
-rw-r--r--lib/librte_ether/rte_dev_info.h33
-rw-r--r--lib/librte_ether/rte_eth_ctrl.h45
-rw-r--r--lib/librte_ether/rte_ethdev.c1089
-rw-r--r--lib/librte_ether/rte_ethdev.h2131
-rw-r--r--lib/librte_ether/rte_ethdev_core.h613
-rw-r--r--lib/librte_ether/rte_ethdev_driver.h132
-rw-r--r--lib/librte_ether/rte_ethdev_pci.h3
-rw-r--r--lib/librte_ether/rte_ethdev_vdev.h3
-rw-r--r--lib/librte_ether/rte_ethdev_version.map17
-rw-r--r--lib/librte_ether/rte_flow.c69
-rw-r--r--lib/librte_ether/rte_flow.h66
-rw-r--r--lib/librte_ether/rte_flow_driver.h34
-rw-r--r--lib/librte_ether/rte_mtr.c58
-rw-r--r--lib/librte_ether/rte_mtr.h26
-rw-r--r--lib/librte_ether/rte_mtr_driver.h33
-rw-r--r--lib/librte_ether/rte_tm.c33
-rw-r--r--lib/librte_ether/rte_tm_driver.h33
21 files changed, 2562 insertions, 1984 deletions
diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile
index 394cc9c0..3ca5782b 100644
--- a/lib/librte_ether/Makefile
+++ b/lib/librte_ether/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
@@ -36,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_ethdev.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_net -lrte_eal -lrte_mempool -lrte_ring
@@ -55,6 +28,8 @@ SRCS-y += ethdev_profile.c
# Export include files
#
SYMLINK-y-include += rte_ethdev.h
+SYMLINK-y-include += rte_ethdev_driver.h
+SYMLINK-y-include += rte_ethdev_core.h
SYMLINK-y-include += rte_ethdev_pci.h
SYMLINK-y-include += rte_ethdev_vdev.h
SYMLINK-y-include += rte_eth_ctrl.h
diff --git a/lib/librte_ether/ethdev_profile.c b/lib/librte_ether/ethdev_profile.c
index c9cb8420..0d1dcda3 100644
--- a/lib/librte_ether/ethdev_profile.c
+++ b/lib/librte_ether/ethdev_profile.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include "ethdev_profile.h"
diff --git a/lib/librte_ether/ethdev_profile.h b/lib/librte_ether/ethdev_profile.h
index 697facff..e5ea3682 100644
--- a/lib/librte_ether/ethdev_profile.h
+++ b/lib/librte_ether/ethdev_profile.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#ifndef _RTE_ETHDEV_PROFILE_H_
diff --git a/lib/librte_ether/meson.build b/lib/librte_ether/meson.build
new file mode 100644
index 00000000..7fed8605
--- /dev/null
+++ b/lib/librte_ether/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+name = 'ethdev'
+version = 8
+allow_experimental_apis = true
+sources = files('ethdev_profile.c',
+ 'rte_ethdev.c',
+ 'rte_flow.c',
+ 'rte_mtr.c',
+ 'rte_tm.c')
+
+headers = files('rte_ethdev.h',
+ 'rte_ethdev_driver.h',
+ 'rte_ethdev_core.h',
+ 'rte_ethdev_pci.h',
+ 'rte_ethdev_vdev.h',
+ 'rte_eth_ctrl.h',
+ 'rte_dev_info.h',
+ 'rte_flow.h',
+ 'rte_flow_driver.h',
+ 'rte_mtr.h',
+ 'rte_mtr_driver.h',
+ 'rte_tm.h',
+ 'rte_tm_driver.h')
+
+deps += ['net']
diff --git a/lib/librte_ether/rte_dev_info.h b/lib/librte_ether/rte_dev_info.h
index aab6d1a6..6b68584d 100644
--- a/lib/librte_ether/rte_dev_info.h
+++ b/lib/librte_ether/rte_dev_info.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
*/
#ifndef _RTE_DEV_INFO_H_
diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ether/rte_eth_ctrl.h
index 83869042..668f59ac 100644
--- a/lib/librte_ether/rte_eth_ctrl.h
+++ b/lib/librte_ether/rte_eth_ctrl.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _RTE_ETH_CTRL_H_
@@ -691,9 +662,9 @@ enum rte_fdir_mode {
RTE_FDIR_MODE_PERFECT_TUNNEL, /**< Enable FDIR filter mode - tunnel. */
};
-#define UINT32_BIT (CHAR_BIT * sizeof(uint32_t))
+#define UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
#define RTE_FLOW_MASK_ARRAY_SIZE \
- (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
+ (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
/**
* A structure used to get the information of flow director filter.
@@ -710,7 +681,7 @@ struct rte_eth_fdir_info {
uint32_t guarant_spc; /**< Guaranteed spaces.*/
uint32_t best_spc; /**< Best effort spaces.*/
/** Bit mask for every supported flow type. */
- uint32_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
+ uint64_t flow_types_mask[RTE_FLOW_MASK_ARRAY_SIZE];
uint32_t max_flexpayload; /**< Total flex payload in bytes. */
/** Flexible payload unit in bytes. Size and alignments of all flex
payload segments should be multiplies of this value. */
@@ -803,7 +774,7 @@ enum rte_eth_hash_function {
};
#define RTE_SYM_HASH_MASK_ARRAY_SIZE \
- (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT32_BIT)/UINT32_BIT)
+ (RTE_ALIGN(RTE_ETH_FLOW_MAX, UINT64_BIT)/UINT64_BIT)
/**
* A structure used to set or get global hash function configurations which
* include symmetric hash enable per flow type and hash function type.
@@ -816,9 +787,9 @@ enum rte_eth_hash_function {
struct rte_eth_hash_global_conf {
enum rte_eth_hash_function hash_func; /**< Hash function type */
/** Bit mask for symmetric hash enable per flow type */
- uint32_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
+ uint64_t sym_hash_enable_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
/** Bit mask indicates if the corresponding bit is valid */
- uint32_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
+ uint64_t valid_bit_mask[RTE_SYM_HASH_MASK_ARRAY_SIZE];
};
/**
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 318af286..0590f0c1 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <sys/types.h>
@@ -63,14 +34,15 @@
#include <rte_errno.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
+#include <rte_compat.h>
#include "rte_ether.h"
#include "rte_ethdev.h"
+#include "rte_ethdev_driver.h"
#include "ethdev_profile.h"
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
-static struct rte_eth_dev_data *rte_eth_dev_data;
static uint8_t eth_dev_last_created_port;
/* spinlock for eth device callbacks */
@@ -82,17 +54,28 @@ static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
/* spinlock for add/remove tx callbacks */
static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
+/* spinlock for shared data allocation */
+static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+
/* store statistics names and its offset in stats structure */
struct rte_eth_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
};
+/* Shared memory between primary and secondary processes. */
+static struct {
+ uint64_t next_owner_id;
+ rte_spinlock_t ownership_lock;
+ struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
+} *rte_eth_dev_shared_data;
+
static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
+ {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
@@ -117,6 +100,61 @@ static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
sizeof(rte_txq_stats_strings[0]))
+#define RTE_RX_OFFLOAD_BIT2STR(_name) \
+ { DEV_RX_OFFLOAD_##_name, #_name }
+
+static const struct {
+ uint64_t offload;
+ const char *name;
+} rte_rx_offload_names[] = {
+ RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
+ RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
+ RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
+ RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
+ RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
+ RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
+ RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
+ RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
+ RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
+ RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
+ RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
+ RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
+ RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
+ RTE_RX_OFFLOAD_BIT2STR(SCATTER),
+ RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
+ RTE_RX_OFFLOAD_BIT2STR(SECURITY),
+};
+
+#undef RTE_RX_OFFLOAD_BIT2STR
+
+#define RTE_TX_OFFLOAD_BIT2STR(_name) \
+ { DEV_TX_OFFLOAD_##_name, #_name }
+
+static const struct {
+ uint64_t offload;
+ const char *name;
+} rte_tx_offload_names[] = {
+ RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
+ RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
+ RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
+ RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
+ RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
+ RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
+ RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
+ RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
+ RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
+ RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
+ RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
+ RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
+ RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
+ RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
+ RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
+ RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
+ RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
+ RTE_TX_OFFLOAD_BIT2STR(SECURITY),
+};
+
+#undef RTE_TX_OFFLOAD_BIT2STR
/**
* The user application callback description.
@@ -142,7 +180,8 @@ uint16_t
rte_eth_find_next(uint16_t port_id)
{
while (port_id < RTE_MAX_ETHPORTS &&
- rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
+ rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
+ rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
port_id++;
if (port_id >= RTE_MAX_ETHPORTS)
@@ -152,24 +191,35 @@ rte_eth_find_next(uint16_t port_id)
}
static void
-rte_eth_dev_data_alloc(void)
+rte_eth_dev_shared_data_prepare(void)
{
const unsigned flags = 0;
const struct rte_memzone *mz;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
- RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
- rte_socket_id(), flags);
- } else
- mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
- if (mz == NULL)
- rte_panic("Cannot allocate memzone for ethernet port data\n");
+ rte_spinlock_lock(&rte_eth_shared_data_lock);
+
+ if (rte_eth_dev_shared_data == NULL) {
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* Allocate port data and ownership shared memory. */
+ mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
+ sizeof(*rte_eth_dev_shared_data),
+ rte_socket_id(), flags);
+ } else
+ mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
+ if (mz == NULL)
+ rte_panic("Cannot allocate ethdev shared data\n");
+
+ rte_eth_dev_shared_data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rte_eth_dev_shared_data->next_owner_id =
+ RTE_ETH_DEV_NO_OWNER + 1;
+ rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
+ memset(rte_eth_dev_shared_data->data, 0,
+ sizeof(rte_eth_dev_shared_data->data));
+ }
+ }
- rte_eth_dev_data = mz->addr;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- memset(rte_eth_dev_data, 0,
- RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
+ rte_spinlock_unlock(&rte_eth_shared_data_lock);
}
struct rte_eth_dev *
@@ -191,8 +241,12 @@ rte_eth_dev_find_free_port(void)
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
+ /* Using shared name field to find a free port. */
+ if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
+ RTE_ASSERT(rte_eth_devices[i].state ==
+ RTE_ETH_DEV_UNUSED);
return i;
+ }
}
return RTE_MAX_ETHPORTS;
}
@@ -202,9 +256,8 @@ eth_dev_get(uint16_t port_id)
{
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
- eth_dev->data = &rte_eth_dev_data[port_id];
+ eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
eth_dev->state = RTE_ETH_DEV_ATTACHED;
- TAILQ_INIT(&(eth_dev->link_intr_cbs));
eth_dev_last_created_port = port_id;
@@ -215,29 +268,36 @@ struct rte_eth_dev *
rte_eth_dev_allocate(const char *name)
{
uint16_t port_id;
- struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev *eth_dev = NULL;
+
+ rte_eth_dev_shared_data_prepare();
+
+ /* Synchronize port creation between primary and secondary threads. */
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
port_id = rte_eth_dev_find_free_port();
if (port_id == RTE_MAX_ETHPORTS) {
- RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
- return NULL;
+ RTE_LOG(ERR, EAL, "Reached maximum number of Ethernet ports\n");
+ goto unlock;
}
- if (rte_eth_dev_data == NULL)
- rte_eth_dev_data_alloc();
-
if (rte_eth_dev_allocated(name) != NULL) {
- RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
+ RTE_LOG(ERR, EAL, "Ethernet Device with name %s already allocated!\n",
name);
- return NULL;
+ goto unlock;
}
- memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
eth_dev = eth_dev_get(port_id);
snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
eth_dev->data->port_id = port_id;
eth_dev->data->mtu = ETHER_MTU;
+unlock:
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+
+ if (eth_dev != NULL)
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
+
return eth_dev;
}
@@ -250,25 +310,27 @@ struct rte_eth_dev *
rte_eth_dev_attach_secondary(const char *name)
{
uint16_t i;
- struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev *eth_dev = NULL;
+
+ rte_eth_dev_shared_data_prepare();
- if (rte_eth_dev_data == NULL)
- rte_eth_dev_data_alloc();
+ /* Synchronize port attachment to primary port creation and release. */
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (strcmp(rte_eth_dev_data[i].name, name) == 0)
+ if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
break;
}
if (i == RTE_MAX_ETHPORTS) {
RTE_PMD_DEBUG_TRACE(
"device %s is not driven by the primary process\n",
name);
- return NULL;
+ } else {
+ eth_dev = eth_dev_get(i);
+ RTE_ASSERT(eth_dev->data->port_id == i);
}
- eth_dev = eth_dev_get(i);
- RTE_ASSERT(eth_dev->data->port_id == i);
-
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
return eth_dev;
}
@@ -278,7 +340,18 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
if (eth_dev == NULL)
return -EINVAL;
+ rte_eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
+
eth_dev->state = RTE_ETH_DEV_UNUSED;
+
+ memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
+
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
+
return 0;
}
@@ -286,13 +359,160 @@ int
rte_eth_dev_is_valid_port(uint16_t port_id)
{
if (port_id >= RTE_MAX_ETHPORTS ||
- (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
- rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
+ (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
return 0;
else
return 1;
}
+static int
+rte_eth_is_valid_owner_id(uint64_t owner_id)
+{
+ if (owner_id == RTE_ETH_DEV_NO_OWNER ||
+ rte_eth_dev_shared_data->next_owner_id <= owner_id) {
+ RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
+ return 0;
+ }
+ return 1;
+}
+
+uint64_t __rte_experimental
+rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
+{
+ while (port_id < RTE_MAX_ETHPORTS &&
+ ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
+ rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
+ rte_eth_devices[port_id].data->owner.id != owner_id))
+ port_id++;
+
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return RTE_MAX_ETHPORTS;
+
+ return port_id;
+}
+
+int __rte_experimental
+rte_eth_dev_owner_new(uint64_t *owner_id)
+{
+ rte_eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
+
+ *owner_id = rte_eth_dev_shared_data->next_owner_id++;
+
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+ return 0;
+}
+
+static int
+_rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
+ const struct rte_eth_dev_owner *new_owner)
+{
+ struct rte_eth_dev_owner *port_owner;
+ int sret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (!rte_eth_is_valid_owner_id(new_owner->id) &&
+ !rte_eth_is_valid_owner_id(old_owner_id))
+ return -EINVAL;
+
+ port_owner = &rte_eth_devices[port_id].data->owner;
+ if (port_owner->id != old_owner_id) {
+ RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
+ " by %s_%016lX.\n", port_id,
+ port_owner->name, port_owner->id);
+ return -EPERM;
+ }
+
+ sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
+ new_owner->name);
+ if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
+ RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
+ port_id);
+
+ port_owner->id = new_owner->id;
+
+ RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
+ new_owner->name, new_owner->id);
+
+ return 0;
+}
+
+int __rte_experimental
+rte_eth_dev_owner_set(const uint16_t port_id,
+ const struct rte_eth_dev_owner *owner)
+{
+ int ret;
+
+ rte_eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
+
+ ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
+
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+ return ret;
+}
+
+int __rte_experimental
+rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
+{
+ const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
+ {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
+ int ret;
+
+ rte_eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
+
+ ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
+
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+ return ret;
+}
+
+void __rte_experimental
+rte_eth_dev_owner_delete(const uint64_t owner_id)
+{
+ uint16_t port_id;
+
+ rte_eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
+
+ if (rte_eth_is_valid_owner_id(owner_id)) {
+ RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
+ memset(&rte_eth_devices[port_id].data->owner, 0,
+ sizeof(struct rte_eth_dev_owner));
+ RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
+ " have removed.\n", owner_id);
+ }
+
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+}
+
+int __rte_experimental
+rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
+{
+ int ret = 0;
+
+ rte_eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ ret = -ENODEV;
+ } else {
+ rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
+ sizeof(*owner));
+ }
+
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+ return ret;
+}
+
int
rte_eth_dev_socket_id(uint16_t port_id)
{
@@ -335,7 +555,7 @@ rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
/* shouldn't check 'rte_eth_devices[i].data',
* because it might be overwritten by VDEV PMD */
- tmp = rte_eth_dev_data[port_id].name;
+ tmp = rte_eth_dev_shared_data->data[port_id].name;
strcpy(name, tmp);
return 0;
}
@@ -343,25 +563,35 @@ rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int
rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
{
- int i;
+ uint32_t pid;
if (name == NULL) {
RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
return -EINVAL;
}
- RTE_ETH_FOREACH_DEV(i) {
- if (!strncmp(name,
- rte_eth_dev_data[i].name, strlen(name))) {
-
- *port_id = i;
-
+ for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
+ if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
+ !strncmp(name, rte_eth_dev_shared_data->data[pid].name,
+ strlen(name))) {
+ *port_id = pid;
return 0;
}
}
+
return -ENODEV;
}
+static int
+eth_err(uint16_t port_id, int ret)
+{
+ if (ret == 0)
+ return 0;
+ if (rte_eth_dev_is_removed(port_id))
+ return -EIO;
+ return ret;
+}
+
/* attach the new device, then store port_id of the device */
int
rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
@@ -437,7 +667,7 @@ rte_eth_dev_detach(uint16_t port_id, char *name)
if (ret < 0)
goto err;
- rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED;
+ rte_eth_dev_release_port(&rte_eth_devices[port_id]);
return 0;
err:
@@ -516,7 +746,8 @@ rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
return 0;
}
- return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
+ return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
+ rx_queue_id));
}
@@ -542,7 +773,7 @@ rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
return 0;
}
- return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
+ return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
}
@@ -568,7 +799,8 @@ rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
return 0;
}
- return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
+ return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
+ tx_queue_id));
}
@@ -594,7 +826,7 @@ rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
return 0;
}
- return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
+ return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
}
@@ -770,6 +1002,38 @@ rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
rxmode->security = 0;
}
+const char * __rte_experimental
+rte_eth_dev_rx_offload_name(uint64_t offload)
+{
+ const char *name = "UNKNOWN";
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
+ if (offload == rte_rx_offload_names[i].offload) {
+ name = rte_rx_offload_names[i].name;
+ break;
+ }
+ }
+
+ return name;
+}
+
+const char * __rte_experimental
+rte_eth_dev_tx_offload_name(uint64_t offload)
+{
+ const char *name = "UNKNOWN";
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
+ if (offload == rte_tx_offload_names[i].offload) {
+ name = rte_tx_offload_names[i].name;
+ break;
+ }
+ }
+
+ return name;
+}
+
int
rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
@@ -810,7 +1074,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
* Convert between the offloads API to enable PMDs to support
* only one of them.
*/
- if ((dev_conf->rxmode.ignore_offload_bitfield == 0)) {
+ if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
rte_eth_convert_rx_offload_bitfield(
&dev_conf->rxmode, &local_conf.rxmode.offloads);
} else {
@@ -912,7 +1176,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
- return diag;
+ return eth_err(port_id, diag);
}
/* Initialize Rx profiling if enabled at compilation time. */
@@ -922,7 +1186,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
- return diag;
+ return eth_err(port_id, diag);
}
return 0;
@@ -1022,7 +1286,7 @@ rte_eth_dev_start(uint16_t port_id)
if (diag == 0)
dev->data->dev_started = 1;
else
- return diag;
+ return eth_err(port_id, diag);
rte_eth_dev_config_restore(port_id);
@@ -1064,7 +1328,7 @@ rte_eth_dev_set_link_up(uint16_t port_id)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
- return (*dev->dev_ops->dev_set_link_up)(dev);
+ return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
}
int
@@ -1077,7 +1341,7 @@ rte_eth_dev_set_link_down(uint16_t port_id)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
- return (*dev->dev_ops->dev_set_link_down)(dev);
+ return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
}
void
@@ -1114,6 +1378,29 @@ rte_eth_dev_reset(uint16_t port_id)
rte_eth_dev_stop(port_id);
ret = dev->dev_ops->dev_reset(dev);
+ return eth_err(port_id, ret);
+}
+
+int __rte_experimental
+rte_eth_dev_is_removed(uint16_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->state == RTE_ETH_DEV_REMOVED)
+ return 1;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
+
+ ret = dev->dev_ops->is_removed(dev);
+ if (ret != 0)
+ /* Device is physically removed. */
+ dev->state = RTE_ETH_DEV_REMOVED;
+
return ret;
}
@@ -1216,7 +1503,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
dev->data->min_rx_buf_size = mbp_buf_size;
}
- return ret;
+ return eth_err(port_id, ret);
}
/**
@@ -1335,8 +1622,8 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
&local_conf.offloads);
}
- return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
- socket_id, &local_conf);
+ return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
+ tx_queue_id, nb_tx_desc, socket_id, &local_conf));
}
void
@@ -1392,14 +1679,16 @@ int
rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ int ret;
/* Validate Input Data. Bail if not valid or not supported. */
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
/* Call driver to free pending mbufs. */
- return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
- free_cnt);
+ ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
+ free_cnt);
+ return eth_err(port_id, ret);
}
void
@@ -1536,7 +1825,7 @@ rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
- return (*dev->dev_ops->stats_get)(dev, stats);
+ return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
}
int
@@ -1582,12 +1871,12 @@ get_xstats_count(uint16_t port_id)
count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
NULL, 0);
if (count < 0)
- return count;
+ return eth_err(port_id, count);
}
if (dev->dev_ops->xstats_get_names != NULL) {
count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
if (count < 0)
- return count;
+ return eth_err(port_id, count);
} else
count = 0;
@@ -1641,6 +1930,45 @@ rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
return -EINVAL;
}
+/* retrieve basic stats names */
+static int
+rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names)
+{
+ int cnt_used_entries = 0;
+ uint32_t idx, id_queue;
+ uint16_t num_q;
+
+ for (idx = 0; idx < RTE_NB_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "%s", rte_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (id_queue = 0; id_queue < num_q; id_queue++) {
+ for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "rx_q%u%s",
+ id_queue, rte_rxq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+
+ }
+ num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (id_queue = 0; id_queue < num_q; id_queue++) {
+ for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "tx_q%u%s",
+ id_queue, rte_txq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ }
+ return cnt_used_entries;
+}
+
/* retrieve ethdev extended statistics names */
int
rte_eth_xstats_get_names_by_id(uint16_t port_id,
@@ -1649,7 +1977,9 @@ rte_eth_xstats_get_names_by_id(uint16_t port_id,
{
struct rte_eth_xstat_name *xstats_names_copy;
unsigned int no_basic_stat_requested = 1;
+ unsigned int no_ext_stat_requested = 1;
unsigned int expected_entries;
+ unsigned int basic_count;
struct rte_eth_dev *dev;
unsigned int i;
int ret;
@@ -1657,6 +1987,7 @@ rte_eth_xstats_get_names_by_id(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ basic_count = get_xstats_basic_count(dev);
ret = get_xstats_count(port_id);
if (ret < 0)
return ret;
@@ -1674,7 +2005,6 @@ rte_eth_xstats_get_names_by_id(uint16_t port_id,
return -EINVAL;
if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
- unsigned int basic_count = get_xstats_basic_count(dev);
uint64_t ids_copy[size];
for (i = 0; i < size; i++) {
@@ -1713,8 +2043,26 @@ rte_eth_xstats_get_names_by_id(uint16_t port_id,
return -ENOMEM;
}
+ if (ids) {
+ for (i = 0; i < size; i++) {
+ if (ids[i] >= basic_count) {
+ no_ext_stat_requested = 0;
+ break;
+ }
+ }
+ }
+
/* Fill xstats_names_copy structure */
- rte_eth_xstats_get_names(port_id, xstats_names_copy, expected_entries);
+ if (ids && no_ext_stat_requested) {
+ rte_eth_basic_stats_get_names(dev, xstats_names_copy);
+ } else {
+ ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
+ expected_entries);
+ if (ret < 0) {
+ free(xstats_names_copy);
+ return ret;
+ }
+ }
/* Filter stats */
for (i = 0; i < size; i++) {
@@ -1739,8 +2087,6 @@ rte_eth_xstats_get_names(uint16_t port_id,
int cnt_used_entries;
int cnt_expected_entries;
int cnt_driver_entries;
- uint32_t idx, id_queue;
- uint16_t num_q;
cnt_expected_entries = get_xstats_count(port_id);
if (xstats_names == NULL || cnt_expected_entries < 0 ||
@@ -1749,35 +2095,9 @@ rte_eth_xstats_get_names(uint16_t port_id,
/* port_id checked in get_xstats_count() */
dev = &rte_eth_devices[port_id];
- cnt_used_entries = 0;
- for (idx = 0; idx < RTE_NB_STATS; idx++) {
- snprintf(xstats_names[cnt_used_entries].name,
- sizeof(xstats_names[0].name),
- "%s", rte_stats_strings[idx].name);
- cnt_used_entries++;
- }
- num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
- for (id_queue = 0; id_queue < num_q; id_queue++) {
- for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
- snprintf(xstats_names[cnt_used_entries].name,
- sizeof(xstats_names[0].name),
- "rx_q%u%s",
- id_queue, rte_rxq_stats_strings[idx].name);
- cnt_used_entries++;
- }
-
- }
- num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
- for (id_queue = 0; id_queue < num_q; id_queue++) {
- for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
- snprintf(xstats_names[cnt_used_entries].name,
- sizeof(xstats_names[0].name),
- "tx_q%u%s",
- id_queue, rte_txq_stats_strings[idx].name);
- cnt_used_entries++;
- }
- }
+ cnt_used_entries = rte_eth_basic_stats_get_names(
+ dev, xstats_names);
if (dev->dev_ops->xstats_get_names != NULL) {
/* If there are any driver-specific xstats, append them
@@ -1788,29 +2108,87 @@ rte_eth_xstats_get_names(uint16_t port_id,
xstats_names + cnt_used_entries,
size - cnt_used_entries);
if (cnt_driver_entries < 0)
- return cnt_driver_entries;
+ return eth_err(port_id, cnt_driver_entries);
cnt_used_entries += cnt_driver_entries;
}
return cnt_used_entries;
}
+
+static int
+rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_stats eth_stats;
+ unsigned int count = 0, i, q;
+ uint64_t val, *stats_ptr;
+ uint16_t nb_rxqs, nb_txqs;
+ int ret;
+
+ ret = rte_eth_stats_get(port_id, &eth_stats);
+ if (ret < 0)
+ return ret;
+
+ dev = &rte_eth_devices[port_id];
+
+ nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ /* global stats */
+ for (i = 0; i < RTE_NB_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_stats_strings[i].offset);
+ val = *stats_ptr;
+ xstats[count++].value = val;
+ }
+
+ /* per-rxq stats */
+ for (q = 0; q < nb_rxqs; q++) {
+ for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_rxq_stats_strings[i].offset +
+ q * sizeof(uint64_t));
+ val = *stats_ptr;
+ xstats[count++].value = val;
+ }
+ }
+
+ /* per-txq stats */
+ for (q = 0; q < nb_txqs; q++) {
+ for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_txq_stats_strings[i].offset +
+ q * sizeof(uint64_t));
+ val = *stats_ptr;
+ xstats[count++].value = val;
+ }
+ }
+ return count;
+}
+
/* retrieve ethdev extended statistics */
int
rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
uint64_t *values, unsigned int size)
{
unsigned int no_basic_stat_requested = 1;
+ unsigned int no_ext_stat_requested = 1;
unsigned int num_xstats_filled;
+ unsigned int basic_count;
uint16_t expected_entries;
struct rte_eth_dev *dev;
unsigned int i;
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- expected_entries = get_xstats_count(port_id);
+ ret = get_xstats_count(port_id);
+ if (ret < 0)
+ return ret;
+ expected_entries = (uint16_t)ret;
struct rte_eth_xstat xstats[expected_entries];
dev = &rte_eth_devices[port_id];
+ basic_count = get_xstats_basic_count(dev);
/* Return max number of stats if no ids given */
if (!ids) {
@@ -1845,8 +2223,21 @@ rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
values, size);
}
+ if (ids) {
+ for (i = 0; i < size; i++) {
+ if (ids[i] >= basic_count) {
+ no_ext_stat_requested = 0;
+ break;
+ }
+ }
+ }
+
/* Fill the xstats structure */
- ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
+ if (ids && no_ext_stat_requested)
+ ret = rte_eth_basic_stats_get(port_id, xstats);
+ else
+ ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
+
if (ret < 0)
return ret;
num_xstats_filled = (unsigned int)ret;
@@ -1873,12 +2264,11 @@ int
rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
unsigned int n)
{
- struct rte_eth_stats eth_stats;
struct rte_eth_dev *dev;
- unsigned int count = 0, i, q;
+ unsigned int count = 0, i;
signed int xcount = 0;
- uint64_t val, *stats_ptr;
uint16_t nb_rxqs, nb_txqs;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -1901,45 +2291,17 @@ rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
(n > count) ? n - count : 0);
if (xcount < 0)
- return xcount;
+ return eth_err(port_id, xcount);
}
if (n < count + xcount || xstats == NULL)
return count + xcount;
/* now fill the xstats structure */
- count = 0;
- rte_eth_stats_get(port_id, &eth_stats);
-
- /* global stats */
- for (i = 0; i < RTE_NB_STATS; i++) {
- stats_ptr = RTE_PTR_ADD(&eth_stats,
- rte_stats_strings[i].offset);
- val = *stats_ptr;
- xstats[count++].value = val;
- }
-
- /* per-rxq stats */
- for (q = 0; q < nb_rxqs; q++) {
- for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
- stats_ptr = RTE_PTR_ADD(&eth_stats,
- rte_rxq_stats_strings[i].offset +
- q * sizeof(uint64_t));
- val = *stats_ptr;
- xstats[count++].value = val;
- }
- }
-
- /* per-txq stats */
- for (q = 0; q < nb_txqs; q++) {
- for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
- stats_ptr = RTE_PTR_ADD(&eth_stats,
- rte_txq_stats_strings[i].offset +
- q * sizeof(uint64_t));
- val = *stats_ptr;
- xstats[count++].value = val;
- }
- }
+ ret = rte_eth_basic_stats_get(port_id, xstats);
+ if (ret < 0)
+ return ret;
+ count = ret;
for (i = 0; i < count; i++)
xstats[i].id = i;
@@ -1989,8 +2351,8 @@ int
rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
uint8_t stat_idx)
{
- return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
- STAT_QMAP_TX);
+ return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
+ stat_idx, STAT_QMAP_TX));
}
@@ -1998,8 +2360,8 @@ int
rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
uint8_t stat_idx)
{
- return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
- STAT_QMAP_RX);
+ return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
+ stat_idx, STAT_QMAP_RX));
}
int
@@ -2011,7 +2373,8 @@ rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
- return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
+ return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
+ fw_version, fw_size));
}
void
@@ -2101,7 +2464,7 @@ rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
if (!ret)
dev->data->mtu = mtu;
- return ret;
+ return eth_err(port_id, ret);
}
int
@@ -2141,7 +2504,7 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
}
- return ret;
+ return eth_err(port_id, ret);
}
int
@@ -2174,7 +2537,8 @@ rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
- return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
+ return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
+ tpid));
}
int
@@ -2252,7 +2616,7 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
&dev->data->dev_conf.rxmode);
}
- return ret;
+ return eth_err(port_id, ret);
}
int
@@ -2287,9 +2651,8 @@ rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
- (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
- return 0;
+ return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
}
int
@@ -2301,7 +2664,7 @@ rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
memset(fc_conf, 0, sizeof(*fc_conf));
- return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
+ return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
}
int
@@ -2317,7 +2680,7 @@ rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
- return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
+ return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
}
int
@@ -2335,7 +2698,8 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
dev = &rte_eth_devices[port_id];
/* High water, low water validation are device specific */
if (*dev->dev_ops->priority_flow_ctrl_set)
- return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
+ return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
+ (dev, pfc_conf));
return -ENOTSUP;
}
@@ -2410,7 +2774,8 @@ rte_eth_dev_rss_reta_update(uint16_t port_id,
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
- return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
+ return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
+ reta_size));
}
int
@@ -2430,7 +2795,8 @@ rte_eth_dev_rss_reta_query(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
- return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
+ return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
+ reta_size));
}
int
@@ -2442,7 +2808,8 @@ rte_eth_dev_rss_hash_update(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
- return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
+ return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
+ rss_conf));
}
int
@@ -2454,7 +2821,8 @@ rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
- return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
+ return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
+ rss_conf));
}
int
@@ -2476,7 +2844,8 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
- return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
+ return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
+ udp_tunnel));
}
int
@@ -2499,7 +2868,8 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
- return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
+ return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
+ udp_tunnel));
}
int
@@ -2510,7 +2880,7 @@ rte_eth_led_on(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
- return (*dev->dev_ops->dev_led_on)(dev);
+ return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
}
int
@@ -2521,7 +2891,7 @@ rte_eth_led_off(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
- return (*dev->dev_ops->dev_led_off)(dev);
+ return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
}
/*
@@ -2597,7 +2967,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
dev->data->mac_pool_sel[index] |= (1ULL << pool);
}
- return ret;
+ return eth_err(port_id, ret);
}
int
@@ -2693,7 +3063,7 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
index = get_hash_mac_addr_index(port_id, addr);
/* Check if it's already there, and do nothing */
- if ((index >= 0) && (on))
+ if ((index >= 0) && on)
return 0;
if (index < 0) {
@@ -2723,7 +3093,7 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
&dev->data->hash_mac_addrs[index]);
}
- return ret;
+ return eth_err(port_id, ret);
}
int
@@ -2736,7 +3106,8 @@ rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
- return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
+ return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
+ on));
}
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
@@ -2766,7 +3137,8 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
- return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
+ return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
+ queue_idx, tx_rate));
}
int
@@ -2804,7 +3176,8 @@ rte_eth_mirror_rule_set(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
- return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
+ return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
+ mirror_conf, rule_id, on));
}
int
@@ -2817,7 +3190,16 @@ rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
- return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
+ return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
+ rule_id));
+}
+
+RTE_INIT(eth_dev_init_cb_lists)
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++)
+ TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
}
int
@@ -2827,37 +3209,59 @@ rte_eth_dev_callback_register(uint16_t port_id,
{
struct rte_eth_dev *dev;
struct rte_eth_dev_callback *user_cb;
+ uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
+ uint16_t last_port;
if (!cb_fn)
return -EINVAL;
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
+ RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ if (port_id == RTE_ETH_ALL) {
+ next_port = 0;
+ last_port = RTE_MAX_ETHPORTS - 1;
+ } else {
+ next_port = last_port = port_id;
+ }
- dev = &rte_eth_devices[port_id];
rte_spinlock_lock(&rte_eth_dev_cb_lock);
- TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
- if (user_cb->cb_fn == cb_fn &&
- user_cb->cb_arg == cb_arg &&
- user_cb->event == event) {
- break;
+ do {
+ dev = &rte_eth_devices[next_port];
+
+ TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
+ if (user_cb->cb_fn == cb_fn &&
+ user_cb->cb_arg == cb_arg &&
+ user_cb->event == event) {
+ break;
+ }
}
- }
- /* create a new callback. */
- if (user_cb == NULL) {
- user_cb = rte_zmalloc("INTR_USER_CALLBACK",
- sizeof(struct rte_eth_dev_callback), 0);
- if (user_cb != NULL) {
- user_cb->cb_fn = cb_fn;
- user_cb->cb_arg = cb_arg;
- user_cb->event = event;
- TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
+ /* create a new callback. */
+ if (user_cb == NULL) {
+ user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_eth_dev_callback), 0);
+ if (user_cb != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ user_cb->event = event;
+ TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
+ user_cb, next);
+ } else {
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ rte_eth_dev_callback_unregister(port_id, event,
+ cb_fn, cb_arg);
+ return -ENOMEM;
+ }
+
}
- }
+ } while (++next_port <= last_port);
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
- return (user_cb == NULL) ? -ENOMEM : 0;
+ return 0;
}
int
@@ -2868,36 +3272,50 @@ rte_eth_dev_callback_unregister(uint16_t port_id,
int ret;
struct rte_eth_dev *dev;
struct rte_eth_dev_callback *cb, *next;
+ uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
+ uint16_t last_port;
if (!cb_fn)
return -EINVAL;
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
+ RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ if (port_id == RTE_ETH_ALL) {
+ next_port = 0;
+ last_port = RTE_MAX_ETHPORTS - 1;
+ } else {
+ next_port = last_port = port_id;
+ }
- dev = &rte_eth_devices[port_id];
rte_spinlock_lock(&rte_eth_dev_cb_lock);
- ret = 0;
- for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
+ do {
+ dev = &rte_eth_devices[next_port];
+ ret = 0;
+ for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
+ cb = next) {
- next = TAILQ_NEXT(cb, next);
+ next = TAILQ_NEXT(cb, next);
- if (cb->cb_fn != cb_fn || cb->event != event ||
- (cb->cb_arg != (void *)-1 &&
- cb->cb_arg != cb_arg))
- continue;
+ if (cb->cb_fn != cb_fn || cb->event != event ||
+ (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
+ continue;
- /*
- * if this callback is not executing right now,
- * then remove it.
- */
- if (cb->active == 0) {
- TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
- rte_free(cb);
- } else {
- ret = -EAGAIN;
+ /*
+ * if this callback is not executing right now,
+ * then remove it.
+ */
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
+ rte_free(cb);
+ } else {
+ ret = -EAGAIN;
+ }
}
- }
+ } while (++next_port <= last_port);
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
return ret;
@@ -2905,7 +3323,7 @@ rte_eth_dev_callback_unregister(uint16_t port_id,
int
_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *cb_arg, void *ret_param)
+ enum rte_eth_event_type event, void *ret_param)
{
struct rte_eth_dev_callback *cb_lst;
struct rte_eth_dev_callback dev_cb;
@@ -2917,8 +3335,6 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
continue;
dev_cb = *cb_lst;
cb_lst->active = 1;
- if (cb_arg != NULL)
- dev_cb.cb_arg = cb_arg;
if (ret_param != NULL)
dev_cb.ret_param = ret_param;
@@ -3039,7 +3455,8 @@ rte_eth_dev_rx_intr_enable(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
- return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
+ return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
+ queue_id));
}
int
@@ -3053,7 +3470,8 @@ rte_eth_dev_rx_intr_disable(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
- return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
+ return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
+ queue_id));
}
@@ -3072,8 +3490,153 @@ rte_eth_dev_filter_supported(uint16_t port_id,
}
int
-rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg);
+
+int
+rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ struct rte_eth_fdir_info_v22 {
+ enum rte_fdir_mode mode;
+ struct rte_eth_fdir_masks mask;
+ struct rte_eth_fdir_flex_conf flex_conf;
+ uint32_t guarant_spc;
+ uint32_t best_spc;
+ uint32_t flow_types_mask[1];
+ uint32_t max_flexpayload;
+ uint32_t flex_payload_unit;
+ uint32_t max_flex_payload_segment_num;
+ uint16_t flex_payload_limit;
+ uint32_t flex_bitmask_unit;
+ uint32_t max_flex_bitmask_num;
+ };
+
+ struct rte_eth_hash_global_conf_v22 {
+ enum rte_eth_hash_function hash_func;
+ uint32_t sym_hash_enable_mask[1];
+ uint32_t valid_bit_mask[1];
+ };
+
+ struct rte_eth_hash_filter_info_v22 {
+ enum rte_eth_hash_filter_info_type info_type;
+ union {
+ uint8_t enable;
+ struct rte_eth_hash_global_conf_v22 global_conf;
+ struct rte_eth_input_set_conf input_set_conf;
+ } info;
+ };
+
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
+ if (filter_op == RTE_ETH_FILTER_INFO) {
+ int retval;
+ struct rte_eth_fdir_info_v22 *fdir_info_v22;
+ struct rte_eth_fdir_info fdir_info;
+
+ fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
+
+ retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
+ filter_op, (void *)&fdir_info);
+ fdir_info_v22->mode = fdir_info.mode;
+ fdir_info_v22->mask = fdir_info.mask;
+ fdir_info_v22->flex_conf = fdir_info.flex_conf;
+ fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
+ fdir_info_v22->best_spc = fdir_info.best_spc;
+ fdir_info_v22->flow_types_mask[0] =
+ (uint32_t)fdir_info.flow_types_mask[0];
+ fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
+ fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
+ fdir_info_v22->max_flex_payload_segment_num =
+ fdir_info.max_flex_payload_segment_num;
+ fdir_info_v22->flex_payload_limit =
+ fdir_info.flex_payload_limit;
+ fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
+ fdir_info_v22->max_flex_bitmask_num =
+ fdir_info.max_flex_bitmask_num;
+ return retval;
+ } else if (filter_op == RTE_ETH_FILTER_GET) {
+ int retval;
+ struct rte_eth_hash_filter_info f_info;
+ struct rte_eth_hash_filter_info_v22 *f_info_v22 =
+ (struct rte_eth_hash_filter_info_v22 *)arg;
+
+ f_info.info_type = f_info_v22->info_type;
+ retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
+ filter_op, (void *)&f_info);
+
+ switch (f_info_v22->info_type) {
+ case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+ f_info_v22->info.enable = f_info.info.enable;
+ break;
+ case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+ f_info_v22->info.global_conf.hash_func =
+ f_info.info.global_conf.hash_func;
+ f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
+ (uint32_t)
+ f_info.info.global_conf.sym_hash_enable_mask[0];
+ f_info_v22->info.global_conf.valid_bit_mask[0] =
+ (uint32_t)
+ f_info.info.global_conf.valid_bit_mask[0];
+ break;
+ case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
+ f_info_v22->info.input_set_conf =
+ f_info.info.input_set_conf;
+ break;
+ default:
+ break;
+ }
+ return retval;
+ } else if (filter_op == RTE_ETH_FILTER_SET) {
+ struct rte_eth_hash_filter_info f_info;
+ struct rte_eth_hash_filter_info_v22 *f_v22 =
+ (struct rte_eth_hash_filter_info_v22 *)arg;
+
+ f_info.info_type = f_v22->info_type;
+ switch (f_v22->info_type) {
+ case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
+ f_info.info.enable = f_v22->info.enable;
+ break;
+ case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
+ f_info.info.global_conf.hash_func =
+ f_v22->info.global_conf.hash_func;
+ f_info.info.global_conf.sym_hash_enable_mask[0] =
+ (uint32_t)
+ f_v22->info.global_conf.sym_hash_enable_mask[0];
+ f_info.info.global_conf.valid_bit_mask[0] =
+ (uint32_t)
+ f_v22->info.global_conf.valid_bit_mask[0];
+ break;
+ case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
+ f_info.info.input_set_conf =
+ f_v22->info.input_set_conf;
+ break;
+ default:
+ break;
+ }
+ return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
+ (void *)&f_info);
+ } else
+ return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
+ arg);
+}
+VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
+
+int
+rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg);
+
+int
+rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
{
struct rte_eth_dev *dev;
@@ -3081,8 +3644,14 @@ rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
- return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
+ return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
+ filter_op, arg));
}
+BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
+MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg),
+ rte_eth_dev_filter_ctrl_v1802);
void *
rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
@@ -3331,7 +3900,8 @@ rte_eth_dev_set_mc_addr_list(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
- return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
+ return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
+ mc_addr_set, nb_mc_addr));
}
int
@@ -3343,7 +3913,7 @@ rte_eth_timesync_enable(uint16_t port_id)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
- return (*dev->dev_ops->timesync_enable)(dev);
+ return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
}
int
@@ -3355,7 +3925,7 @@ rte_eth_timesync_disable(uint16_t port_id)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
- return (*dev->dev_ops->timesync_disable)(dev);
+ return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
}
int
@@ -3368,7 +3938,8 @@ rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
- return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
+ return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
+ (dev, timestamp, flags));
}
int
@@ -3381,7 +3952,8 @@ rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
- return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
+ return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
+ (dev, timestamp));
}
int
@@ -3393,7 +3965,8 @@ rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
- return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
+ return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
+ delta));
}
int
@@ -3405,7 +3978,8 @@ rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
- return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
+ return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
+ timestamp));
}
int
@@ -3417,7 +3991,8 @@ rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
- return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
+ return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
+ timestamp));
}
int
@@ -3429,7 +4004,7 @@ rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
- return (*dev->dev_ops->get_reg)(dev, info);
+ return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
}
int
@@ -3441,7 +4016,7 @@ rte_eth_dev_get_eeprom_length(uint16_t port_id)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
- return (*dev->dev_ops->get_eeprom_length)(dev);
+ return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
}
int
@@ -3453,7 +4028,7 @@ rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
- return (*dev->dev_ops->get_eeprom)(dev, info);
+ return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
}
int
@@ -3465,7 +4040,7 @@ rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
- return (*dev->dev_ops->set_eeprom)(dev, info);
+ return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
}
int
@@ -3480,7 +4055,7 @@ rte_eth_dev_get_dcb_info(uint16_t port_id,
memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
- return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
+ return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
}
int
@@ -3503,7 +4078,8 @@ rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
-ENOTSUP);
- return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
+ return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
+ l2_tunnel));
}
int
@@ -3534,7 +4110,8 @@ rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
-ENOTSUP);
- return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
+ return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
+ l2_tunnel, mask, en));
}
static void
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 341c2d62..03615330 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#ifndef _RTE_ETHDEV_H_
@@ -46,10 +17,10 @@
* to get its MAC address, the speed and the status of its physical link,
* to receive and to transmit packets, and so on.
*
- * - The driver-oriented Ethernet API that exports a function allowing
- * an Ethernet Poll Mode Driver (PMD) to simultaneously register itself as
- * an Ethernet device driver and as a PCI driver for a set of matching PCI
- * [Ethernet] devices classes.
+ * - The driver-oriented Ethernet API that exports functions allowing
+ * an Ethernet Poll Mode Driver (PMD) to allocate an Ethernet device instance,
+ * create memzone for HW rings and process registered callbacks, and so on.
+ * PMDs should include rte_ethdev_driver.h instead of this header.
*
* By default, all the functions of the Ethernet Device API exported by a PMD
* are lock-free functions which assume to not be invoked in parallel on
@@ -175,12 +146,14 @@ extern "C" {
/* Use this macro to check if LRO API is supported */
#define RTE_ETHDEV_HAS_LRO_SUPPORT
+#include <rte_compat.h>
#include <rte_log.h>
#include <rte_interrupts.h>
#include <rte_dev.h>
#include <rte_devargs.h>
#include <rte_errno.h>
#include <rte_common.h>
+#include <rte_config.h>
#include "rte_ether.h"
#include "rte_eth_ctrl.h"
@@ -262,17 +235,17 @@ __extension__
struct rte_eth_link {
uint32_t link_speed; /**< ETH_SPEED_NUM_ */
uint16_t link_duplex : 1; /**< ETH_LINK_[HALF/FULL]_DUPLEX */
- uint16_t link_autoneg : 1; /**< ETH_LINK_SPEED_[AUTONEG/FIXED] */
+ uint16_t link_autoneg : 1; /**< ETH_LINK_[AUTONEG/FIXED] */
uint16_t link_status : 1; /**< ETH_LINK_[DOWN/UP] */
} __attribute__((aligned(8))); /**< aligned for atomic64 read/write */
/* Utility constants */
-#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection. */
-#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection. */
-#define ETH_LINK_DOWN 0 /**< Link is down. */
-#define ETH_LINK_UP 1 /**< Link is up. */
-#define ETH_LINK_FIXED 0 /**< No autonegotiation. */
-#define ETH_LINK_AUTONEG 1 /**< Autonegotiated. */
+#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_DOWN 0 /**< Link is down (see link_status). */
+#define ETH_LINK_UP 1 /**< Link is up (see link_status). */
+#define ETH_LINK_FIXED 0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_AUTONEG 1 /**< Autonegotiated (see link_autoneg). */
/**
* A structure used to configure the ring threshold registers of an RX/TX
@@ -482,7 +455,6 @@ struct rte_eth_rss_conf {
ETH_RSS_GENEVE | \
ETH_RSS_NVGRE)
-
/**< Mask of valid RSS hash protocols */
#define ETH_RSS_PROTO_MASK ( \
ETH_RSS_IPV4 | \
@@ -974,6 +946,11 @@ struct rte_eth_conf {
DEV_RX_OFFLOAD_VLAN_FILTER | \
DEV_RX_OFFLOAD_VLAN_EXTEND)
+/*
+ * If new Rx offload capabilities are defined, they also must be
+ * mentioned in rte_rx_offload_names in rte_ethdev.c file.
+ */
+
/**
* TX offload capabilities of a device.
*/
@@ -1004,6 +981,11 @@ struct rte_eth_conf {
*/
#define DEV_TX_OFFLOAD_SECURITY 0x00020000
+/*
+ * If new Tx offload capabilities are defined, they also must be
+ * mentioned in rte_tx_offload_names in rte_ethdev.c file.
+ */
+
struct rte_pci_device;
/**
@@ -1137,9 +1119,7 @@ struct rte_eth_dcb_info {
struct rte_eth_dev;
-struct rte_eth_dev_callback;
-/** @internal Structure to keep track of registered callbacks */
-TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
+#define RTE_ETH_ALL RTE_MAX_ETHPORTS
/* Macros to check for valid port */
#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
@@ -1169,485 +1149,13 @@ TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
/**< l2 tunnel forwarding mask */
#define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
-/*
- * Definitions of all functions exported by an Ethernet driver through the
- * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
- * structure associated with an Ethernet device.
- */
-
-typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
-/**< @internal Ethernet device configuration. */
-
-typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to start a configured Ethernet device. */
-
-typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to stop a configured Ethernet device. */
-
-typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to link up a configured Ethernet device. */
-
-typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to link down a configured Ethernet device. */
-
-typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to close a configured Ethernet device. */
-
-typedef int (*eth_dev_reset_t)(struct rte_eth_dev *dev);
-/** <@internal Function used to reset a configured Ethernet device. */
-
-typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to enable the RX promiscuous mode of an Ethernet device. */
-
-typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to disable the RX promiscuous mode of an Ethernet device. */
-
-typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
-/**< @internal Enable the receipt of all multicast packets by an Ethernet device. */
-
-typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
-/**< @internal Disable the receipt of all multicast packets by an Ethernet device. */
-
-typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
- int wait_to_complete);
-/**< @internal Get link speed, duplex mode and state (up/down) of an Ethernet device. */
-
-typedef int (*eth_stats_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_stats *igb_stats);
-/**< @internal Get global I/O statistics of an Ethernet device. */
-
-typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
-/**< @internal Reset global I/O statistics of an Ethernet device to 0. */
-
-typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_xstat *stats, unsigned n);
-/**< @internal Get extended stats of an Ethernet device. */
-
-typedef int (*eth_xstats_get_by_id_t)(struct rte_eth_dev *dev,
- const uint64_t *ids,
- uint64_t *values,
- unsigned int n);
-/**< @internal Get extended stats of an Ethernet device. */
-
-typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
-/**< @internal Reset extended stats of an Ethernet device. */
-
-typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, unsigned size);
-/**< @internal Get names of extended stats of an Ethernet device. */
-
-typedef int (*eth_xstats_get_names_by_id_t)(struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
- unsigned int size);
-/**< @internal Get names of extended stats of an Ethernet device. */
-
-typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
- uint16_t queue_id,
- uint8_t stat_idx,
- uint8_t is_rx);
-/**< @internal Set a queue statistics mapping for a tx/rx queue of an Ethernet device. */
-
-typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
-/**< @internal Get specific informations of an Ethernet device. */
-
-typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
-/**< @internal Get supported ptypes of an Ethernet device. */
-
-typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
- uint16_t queue_id);
-/**< @internal Start rx and tx of a queue of an Ethernet device. */
-
-typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
- uint16_t queue_id);
-/**< @internal Stop rx and tx of a queue of an Ethernet device. */
-
-typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- uint16_t nb_rx_desc,
- unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mb_pool);
-/**< @internal Set up a receive queue of an Ethernet device. */
-
-typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
- uint16_t tx_queue_id,
- uint16_t nb_tx_desc,
- unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf);
-/**< @internal Setup a transmit queue of an Ethernet device. */
-
-typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
-/**< @internal Enable interrupt of a receive queue of an Ethernet device. */
-
-typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
-/**< @internal Disable interrupt of a receive queue of an Ethernet device. */
-
-typedef void (*eth_queue_release_t)(void *queue);
-/**< @internal Release memory resources allocated by given RX/TX queue. */
-
-typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
-/**< @internal Get number of used descriptors on a receive queue. */
-
-typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
-/**< @internal Check DD bit of specific RX descriptor */
-
-typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);
-/**< @internal Check the status of a Rx descriptor */
-
-typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset);
-/**< @internal Check the status of a Tx descriptor */
-
-typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
- char *fw_version, size_t fw_size);
-/**< @internal Get firmware information of an Ethernet device. */
-
-typedef int (*eth_tx_done_cleanup_t)(void *txq, uint32_t free_cnt);
-/**< @internal Force mbufs to be from TX ring. */
-
-typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
- uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
-
-typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
- uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
-
-typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
-/**< @internal Set MTU. */
-
-typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
- uint16_t vlan_id,
- int on);
-/**< @internal filtering of a VLAN Tag Identifier by an Ethernet device. */
-
-typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
- enum rte_vlan_type type, uint16_t tpid);
-/**< @internal set the outer/inner VLAN-TPID by an Ethernet device. */
-
-typedef int (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
-/**< @internal set VLAN offload function by an Ethernet device. */
-
-typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
- uint16_t vlan_id,
- int on);
-/**< @internal set port based TX VLAN insertion by an Ethernet device. */
-
-typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- int on);
-/**< @internal VLAN stripping enable/disable by an queue of Ethernet device. */
-
-typedef uint16_t (*eth_rx_burst_t)(void *rxq,
- struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
-/**< @internal Retrieve input packets from a receive queue of an Ethernet device. */
-
-typedef uint16_t (*eth_tx_burst_t)(void *txq,
- struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
-/**< @internal Send output packets on a transmit queue of an Ethernet device. */
-
-typedef uint16_t (*eth_tx_prep_t)(void *txq,
- struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
-/**< @internal Prepare output packets on a transmit queue of an Ethernet device. */
-
-typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_fc_conf *fc_conf);
-/**< @internal Get current flow control parameter on an Ethernet device */
-
-typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
- struct rte_eth_fc_conf *fc_conf);
-/**< @internal Setup flow control parameter on an Ethernet device */
-
-typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
- struct rte_eth_pfc_conf *pfc_conf);
-/**< @internal Setup priority flow control parameter on an Ethernet device */
-
-typedef int (*reta_update_t)(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size);
-/**< @internal Update RSS redirection table on an Ethernet device */
-
-typedef int (*reta_query_t)(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size);
-/**< @internal Query RSS redirection table on an Ethernet device */
-
-typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
- struct rte_eth_rss_conf *rss_conf);
-/**< @internal Update RSS hash configuration of an Ethernet device */
-
-typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_rss_conf *rss_conf);
-/**< @internal Get current RSS hash configuration of an Ethernet device */
-
-typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
-/**< @internal Turn on SW controllable LED on an Ethernet device */
-
-typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
-/**< @internal Turn off SW controllable LED on an Ethernet device */
-
-typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
-/**< @internal Remove MAC address from receive address register */
-
-typedef int (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index,
- uint32_t vmdq);
-/**< @internal Set a MAC address into Receive Address Address Register */
-
-typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr);
-/**< @internal Set a MAC address into Receive Address Address Register */
-
-typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint8_t on);
-/**< @internal Set a Unicast Hash bitmap */
-
-typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
- uint8_t on);
-/**< @internal Set all Unicast Hash bitmap */
-
-typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t tx_rate);
-/**< @internal Set queue TX rate */
-
-typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t rule_id,
- uint8_t on);
-/**< @internal Add a traffic mirroring rule on an Ethernet device */
-
-typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
- uint8_t rule_id);
-/**< @internal Remove a traffic mirroring rule on an Ethernet device */
-
-typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
- struct rte_eth_udp_tunnel *tunnel_udp);
-/**< @internal Add tunneling UDP port */
-
-typedef int (*eth_udp_tunnel_port_del_t)(struct rte_eth_dev *dev,
- struct rte_eth_udp_tunnel *tunnel_udp);
-/**< @internal Delete tunneling UDP port */
-
-typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
- struct ether_addr *mc_addr_set,
- uint32_t nb_mc_addr);
-/**< @internal set the list of multicast addresses on an Ethernet device */
-
-typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to enable IEEE1588/802.1AS timestamping. */
-
-typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
-/**< @internal Function used to disable IEEE1588/802.1AS timestamping. */
-
-typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
- struct timespec *timestamp,
- uint32_t flags);
-/**< @internal Function used to read an RX IEEE1588/802.1AS timestamp. */
-
-typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
- struct timespec *timestamp);
-/**< @internal Function used to read a TX IEEE1588/802.1AS timestamp. */
-
-typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
-/**< @internal Function used to adjust the device clock */
-
-typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
- struct timespec *timestamp);
-/**< @internal Function used to get time from the device clock. */
-
-typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
- const struct timespec *timestamp);
-/**< @internal Function used to get time from the device clock */
-
-typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
- struct rte_dev_reg_info *info);
-/**< @internal Retrieve registers */
-
-typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
-/**< @internal Retrieve eeprom size */
-
-typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
- struct rte_dev_eeprom_info *info);
-/**< @internal Retrieve eeprom data */
-
-typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
- struct rte_dev_eeprom_info *info);
-/**< @internal Program eeprom data */
-
-typedef int (*eth_l2_tunnel_eth_type_conf_t)
- (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
-/**< @internal config l2 tunnel ether type */
-
-typedef int (*eth_l2_tunnel_offload_set_t)
- (struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel,
- uint32_t mask,
- uint8_t en);
-/**< @internal enable/disable the l2 tunnel offload functions */
-
-
-typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg);
-/**< @internal Take operations to assigned filter type on an Ethernet device */
-
-typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
-/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
-
-typedef int (*eth_mtr_ops_get_t)(struct rte_eth_dev *dev, void *ops);
-/**< @internal Get Trafffic Metering and Policing (MTR) operations */
-
-typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
- struct rte_eth_dcb_info *dcb_info);
-/**< @internal Get dcb information on an Ethernet device */
-
-typedef int (*eth_pool_ops_supported_t)(struct rte_eth_dev *dev,
- const char *pool);
-/**< @internal Test if a port supports specific mempool ops */
-
-/**
- * @internal A structure containing the functions exported by an Ethernet driver.
- */
-struct eth_dev_ops {
- eth_dev_configure_t dev_configure; /**< Configure device. */
- eth_dev_start_t dev_start; /**< Start device. */
- eth_dev_stop_t dev_stop; /**< Stop device. */
- eth_dev_set_link_up_t dev_set_link_up; /**< Device link up. */
- eth_dev_set_link_down_t dev_set_link_down; /**< Device link down. */
- eth_dev_close_t dev_close; /**< Close device. */
- eth_dev_reset_t dev_reset; /**< Reset device. */
- eth_link_update_t link_update; /**< Get device link state. */
-
- eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */
- eth_promiscuous_disable_t promiscuous_disable;/**< Promiscuous OFF. */
- eth_allmulticast_enable_t allmulticast_enable;/**< RX multicast ON. */
- eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OFF. */
- eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address. */
- eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address. */
- eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address. */
- eth_set_mc_addr_list_t set_mc_addr_list; /**< set list of mcast addrs. */
- mtu_set_t mtu_set; /**< Set MTU. */
-
- eth_stats_get_t stats_get; /**< Get generic device statistics. */
- eth_stats_reset_t stats_reset; /**< Reset generic device statistics. */
- eth_xstats_get_t xstats_get; /**< Get extended device statistics. */
- eth_xstats_reset_t xstats_reset; /**< Reset extended device statistics. */
- eth_xstats_get_names_t xstats_get_names;
- /**< Get names of extended statistics. */
- eth_queue_stats_mapping_set_t queue_stats_mapping_set;
- /**< Configure per queue stat counter mapping. */
-
- eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
- eth_rxq_info_get_t rxq_info_get; /**< retrieve RX queue information. */
- eth_txq_info_get_t txq_info_get; /**< retrieve TX queue information. */
- eth_fw_version_get_t fw_version_get; /**< Get firmware version. */
- eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
- /**< Get packet types supported and identified by device. */
-
- vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
- vlan_tpid_set_t vlan_tpid_set; /**< Outer/Inner VLAN TPID Setup. */
- vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */
- vlan_offload_set_t vlan_offload_set; /**< Set VLAN Offload. */
- vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion. */
-
- eth_queue_start_t rx_queue_start;/**< Start RX for a queue. */
- eth_queue_stop_t rx_queue_stop; /**< Stop RX for a queue. */
- eth_queue_start_t tx_queue_start;/**< Start TX for a queue. */
- eth_queue_stop_t tx_queue_stop; /**< Stop TX for a queue. */
- eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue. */
- eth_queue_release_t rx_queue_release; /**< Release RX queue. */
- eth_rx_queue_count_t rx_queue_count;
- /**< Get the number of used RX descriptors. */
- eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
- eth_rx_descriptor_status_t rx_descriptor_status;
- /**< Check the status of a Rx descriptor. */
- eth_tx_descriptor_status_t tx_descriptor_status;
- /**< Check the status of a Tx descriptor. */
- eth_rx_enable_intr_t rx_queue_intr_enable; /**< Enable Rx queue interrupt. */
- eth_rx_disable_intr_t rx_queue_intr_disable; /**< Disable Rx queue interrupt. */
- eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue. */
- eth_queue_release_t tx_queue_release; /**< Release TX queue. */
- eth_tx_done_cleanup_t tx_done_cleanup;/**< Free tx ring mbufs */
-
- eth_dev_led_on_t dev_led_on; /**< Turn on LED. */
- eth_dev_led_off_t dev_led_off; /**< Turn off LED. */
-
- flow_ctrl_get_t flow_ctrl_get; /**< Get flow control. */
- flow_ctrl_set_t flow_ctrl_set; /**< Setup flow control. */
- priority_flow_ctrl_set_t priority_flow_ctrl_set; /**< Setup priority flow control. */
-
- eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array. */
- eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap. */
-
- eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule. */
- eth_mirror_rule_reset_t mirror_rule_reset; /**< reset a traffic mirror rule. */
-
- eth_udp_tunnel_port_add_t udp_tunnel_port_add; /** Add UDP tunnel port. */
- eth_udp_tunnel_port_del_t udp_tunnel_port_del; /** Del UDP tunnel port. */
- eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
- /** Config ether type of l2 tunnel. */
- eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
- /** Enable/disable l2 tunnel offload functions. */
-
- eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit. */
-
- rss_hash_update_t rss_hash_update; /** Configure RSS hash protocols. */
- rss_hash_conf_get_t rss_hash_conf_get; /** Get current RSS hash configuration. */
- reta_update_t reta_update; /** Update redirection table. */
- reta_query_t reta_query; /** Query redirection table. */
-
- eth_get_reg_t get_reg; /**< Get registers. */
- eth_get_eeprom_length_t get_eeprom_length; /**< Get eeprom length. */
- eth_get_eeprom_t get_eeprom; /**< Get eeprom data. */
- eth_set_eeprom_t set_eeprom; /**< Set eeprom. */
-
-
- eth_filter_ctrl_t filter_ctrl; /**< common filter control. */
-
- eth_get_dcb_info get_dcb_info; /** Get DCB information. */
-
- eth_timesync_enable_t timesync_enable;
- /** Turn IEEE1588/802.1AS timestamping on. */
- eth_timesync_disable_t timesync_disable;
- /** Turn IEEE1588/802.1AS timestamping off. */
- eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
- /** Read the IEEE1588/802.1AS RX timestamp. */
- eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
- /** Read the IEEE1588/802.1AS TX timestamp. */
- eth_timesync_adjust_time timesync_adjust_time; /** Adjust the device clock. */
- eth_timesync_read_time timesync_read_time; /** Get the device clock time. */
- eth_timesync_write_time timesync_write_time; /** Set the device clock time. */
-
- eth_xstats_get_by_id_t xstats_get_by_id;
- /**< Get extended device statistic values by ID. */
- eth_xstats_get_names_by_id_t xstats_get_names_by_id;
- /**< Get name of extended device statistics by ID. */
-
- eth_tm_ops_get_t tm_ops_get;
- /**< Get Traffic Management (TM) operations. */
-
- eth_mtr_ops_get_t mtr_ops_get;
- /**< Get Traffic Metering and Policing (MTR) operations. */
-
- eth_pool_ops_supported_t pool_ops_supported;
- /**< Test if a port supports specific mempool ops */
-};
-
/**
* Function type used for RX packet processing packet callbacks.
*
* The callback function is called on RX with a burst of packets that have
* been received on the given port and queue.
*
- * @param port
+ * @param port_id
* The Ethernet port on which RX is being performed.
* @param queue
* The queue on the Ethernet port which is being used to receive the packets.
@@ -1663,7 +1171,7 @@ struct eth_dev_ops {
* @return
* The number of packets returned to the user.
*/
-typedef uint16_t (*rte_rx_callback_fn)(uint16_t port, uint16_t queue,
+typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
void *user_param);
@@ -1673,7 +1181,7 @@ typedef uint16_t (*rte_rx_callback_fn)(uint16_t port, uint16_t queue,
* The callback function is called on TX with a burst of packets immediately
* before the packets are put onto the hardware queue for transmission.
*
- * @param port
+ * @param port_id
* The Ethernet port on which TX is being performed.
* @param queue
* The queue on the Ethernet port which is being used to transmit the packets.
@@ -1687,69 +1195,19 @@ typedef uint16_t (*rte_rx_callback_fn)(uint16_t port, uint16_t queue,
* @return
* The number of packets to be written to the NIC.
*/
-typedef uint16_t (*rte_tx_callback_fn)(uint16_t port, uint16_t queue,
+typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
/**
- * @internal
- * Structure used to hold information about the callbacks to be called for a
- * queue on RX and TX.
- */
-struct rte_eth_rxtx_callback {
- struct rte_eth_rxtx_callback *next;
- union{
- rte_rx_callback_fn rx;
- rte_tx_callback_fn tx;
- } fn;
- void *param;
-};
-
-/**
* A set of values to describe the possible states of an eth device.
*/
enum rte_eth_dev_state {
RTE_ETH_DEV_UNUSED = 0,
RTE_ETH_DEV_ATTACHED,
RTE_ETH_DEV_DEFERRED,
+ RTE_ETH_DEV_REMOVED,
};
-/**
- * @internal
- * The generic data structure associated with each ethernet device.
- *
- * Pointers to burst-oriented packet receive and transmit functions are
- * located at the beginning of the structure, along with the pointer to
- * where all the data elements for the particular device are stored in shared
- * memory. This split allows the function pointer and driver data to be per-
- * process, while the actual configuration data for the device is shared.
- */
-struct rte_eth_dev {
- eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
- eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
- eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
- struct rte_eth_dev_data *data; /**< Pointer to device data */
- const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */
- struct rte_device *device; /**< Backing device */
- struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
- /** User application callbacks for NIC interrupts */
- struct rte_eth_dev_cb_list link_intr_cbs;
- /**
- * User-supplied functions called from rx_burst to post-process
- * received packets before passing them to the user
- */
- struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
- /**
- * User-supplied functions called from tx_burst to pre-process
- * received packets before passing them to the driver for transmission.
- */
- struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
- enum rte_eth_dev_state state; /**< Flag indicating the port state */
- void *security_ctx; /**< Context for security ops */
-} __rte_cache_aligned;
-
-void *
-rte_eth_dev_get_sec_ctx(uint8_t port_id);
-
struct rte_eth_dev_sriov {
uint8_t active; /**< SRIOV is active with 16, 32 or 64 pools */
uint8_t nb_q_per_pool; /**< rx queue number per pool */
@@ -1760,56 +1218,13 @@ struct rte_eth_dev_sriov {
#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
-/**
- * @internal
- * The data part, with no function pointers, associated with each ethernet device.
- *
- * This structure is safe to place in shared memory to be common among different
- * processes in a multi-process configuration.
- */
-struct rte_eth_dev_data {
- char name[RTE_ETH_NAME_MAX_LEN]; /**< Unique identifier name */
-
- void **rx_queues; /**< Array of pointers to RX queues. */
- void **tx_queues; /**< Array of pointers to TX queues. */
- uint16_t nb_rx_queues; /**< Number of RX queues. */
- uint16_t nb_tx_queues; /**< Number of TX queues. */
-
- struct rte_eth_dev_sriov sriov; /**< SRIOV data */
-
- void *dev_private; /**< PMD-specific private data */
-
- struct rte_eth_link dev_link;
- /**< Link-level information & status */
+#define RTE_ETH_DEV_NO_OWNER 0
- struct rte_eth_conf dev_conf; /**< Configuration applied to device. */
- uint16_t mtu; /**< Maximum Transmission Unit. */
+#define RTE_ETH_MAX_OWNER_NAME_LEN 64
- uint32_t min_rx_buf_size;
- /**< Common rx buffer size handled by all queues */
-
- uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */
- struct ether_addr* mac_addrs;/**< Device Ethernet Link address. */
- uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
- /** bitmap array of associating Ethernet MAC addresses to pools */
- struct ether_addr* hash_mac_addrs;
- /** Device Ethernet MAC addresses of hash filtering. */
- uint16_t port_id; /**< Device [external] port identifier. */
- __extension__
- uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
- scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
- all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
- dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */
- lro : 1; /**< RX LRO is ON(1) / OFF(0) */
- uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
- /** Queues state: STARTED(1) / STOPPED(0) */
- uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
- /** Queues state: STARTED(1) / STOPPED(0) */
- uint32_t dev_flags; /**< Capabilities */
- enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
- int numa_node; /**< NUMA node connection */
- struct rte_vlan_filter_conf vlan_filter_conf;
- /**< VLAN filter configuration. */
+struct rte_eth_dev_owner {
+ uint64_t id; /**< The owner unique identifier. */
+ char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */
};
/** Device supports link state interrupt */
@@ -1820,11 +1235,29 @@ struct rte_eth_dev_data {
#define RTE_ETH_DEV_INTR_RMV 0x0008
/**
- * @internal
- * The pool of *rte_eth_dev* structures. The size of the pool
- * is configured at compile-time in the <rte_ethdev.c> file.
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Iterates over valid ethdev ports owned by a specific owner.
+ *
+ * @param port_id
+ * The id of the next possible valid owned port.
+ * @param owner_id
+ * The owner identifier.
+ * RTE_ETH_DEV_NO_OWNER means iterate over all valid ownerless ports.
+ * @return
+ * Next valid port id owned by owner_id, RTE_MAX_ETHPORTS if there is none.
+ */
+uint64_t __rte_experimental rte_eth_find_next_owned_by(uint16_t port_id,
+ const uint64_t owner_id);
+
+/**
+ * Macro to iterate over all enabled ethdev ports owned by a specific owner.
*/
-extern struct rte_eth_dev rte_eth_devices[];
+#define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
+ for (p = rte_eth_find_next_owned_by(0, o); \
+ (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
+ p = rte_eth_find_next_owned_by(p + 1, o))
/**
* Iterates over valid ethdev ports.
@@ -1837,74 +1270,99 @@ extern struct rte_eth_dev rte_eth_devices[];
uint16_t rte_eth_find_next(uint16_t port_id);
/**
- * Macro to iterate over all enabled ethdev ports.
+ * Macro to iterate over all enabled and ownerless ethdev ports.
*/
-#define RTE_ETH_FOREACH_DEV(p) \
- for (p = rte_eth_find_next(0); \
- (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
- p = rte_eth_find_next(p + 1))
+#define RTE_ETH_FOREACH_DEV(p) \
+ RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
/**
- * Get the total number of Ethernet devices that have been successfully
- * initialized by the matching Ethernet driver during the PCI probing phase
- * and that are available for applications to use. These devices must be
- * accessed by using the ``RTE_ETH_FOREACH_DEV()`` macro to deal with
- * non-contiguous ranges of devices.
- * These non-contiguous ranges can be created by calls to hotplug functions or
- * by some PMDs.
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get a new unique owner identifier.
+ * An owner identifier is used to owns Ethernet devices by only one DPDK entity
+ * to avoid multiple management of device by different entities.
*
+ * @param owner_id
+ * Owner identifier pointer.
* @return
- * - The total number of usable Ethernet devices.
+ * Negative errno value on error, 0 on success.
*/
-uint16_t rte_eth_dev_count(void);
+int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id);
/**
- * @internal
- * Returns a ethdev slot specified by the unique identifier name.
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Set an Ethernet device owner.
*
- * @param name
- * The pointer to the Unique identifier name for each Ethernet device
+ * @param port_id
+ * The identifier of the port to own.
+ * @param owner
+ * The owner pointer.
* @return
- * - The pointer to the ethdev slot, on success. NULL on error
+ * Negative errno value on error, 0 on success.
*/
-struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
+int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id,
+ const struct rte_eth_dev_owner *owner);
/**
- * @internal
- * Allocates a new ethdev slot for an ethernet device and returns the pointer
- * to that slot for the driver to use.
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
*
- * @param name Unique identifier name for each Ethernet device
- * @param type Device type of this Ethernet device
+ * Unset Ethernet device owner to make the device ownerless.
+ *
+ * @param port_id
+ * The identifier of port to make ownerless.
+ * @param owner_id
+ * The owner identifier.
* @return
- * - Slot in the rte_dev_devices array for a new device;
+ * 0 on success, negative errno value on error.
+ */
+int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id,
+ const uint64_t owner_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Remove owner from all Ethernet devices owned by a specific owner.
+ *
+ * @param owner_id
+ * The owner identifier.
*/
-struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
+void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id);
/**
- * @internal
- * Attach to the ethdev already initialized by the primary
- * process.
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the owner of an Ethernet device.
*
- * @param name Ethernet device's name.
+ * @param port_id
+ * The port identifier.
+ * @param owner
+ * The owner structure pointer to fill.
* @return
- * - Success: Slot in the rte_dev_devices array for attached
- * device.
- * - Error: Null pointer.
+ * 0 on success, negative errno value on error..
*/
-struct rte_eth_dev *rte_eth_dev_attach_secondary(const char *name);
+int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id,
+ struct rte_eth_dev_owner *owner);
/**
- * @internal
- * Release the specified ethdev port.
+ * Get the total number of Ethernet devices that have been successfully
+ * initialized by the matching Ethernet driver during the PCI probing phase
+ * and that are available for applications to use. These devices must be
+ * accessed by using the ``RTE_ETH_FOREACH_DEV()`` macro to deal with
+ * non-contiguous ranges of devices.
+ * These non-contiguous ranges can be created by calls to hotplug functions or
+ * by some PMDs.
*
- * @param eth_dev
- * The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
* @return
- * - 0 on success, negative on error
+ * - The total number of usable Ethernet devices.
*/
-int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
+uint16_t rte_eth_dev_count(void);
/**
* Attach a new Ethernet device specified by arguments.
@@ -1949,6 +1407,32 @@ int rte_eth_dev_detach(uint16_t port_id, char *devname);
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Get DEV_RX_OFFLOAD_* flag name.
+ *
+ * @param offload
+ * Offload flag.
+ * @return
+ * Offload name or 'UNKNOWN' if the flag cannot be recognised.
+ */
+const char * __rte_experimental rte_eth_dev_rx_offload_name(uint64_t offload);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Get DEV_TX_OFFLOAD_* flag name.
+ *
+ * @param offload
+ * Offload flag.
+ * @return
+ * Offload name or 'UNKNOWN' if the flag cannot be recognised.
+ */
+const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
+
+/**
* Configure an Ethernet device.
* This function must be invoked first before any other function in the
* Ethernet API. This function can also be re-invoked when a device is in the
@@ -1984,17 +1468,18 @@ int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
/**
- * @internal
- * Release device queues and clear its configuration to force the user
- * application to reconfigure it. It is for internal use only.
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
*
- * @param dev
- * Pointer to struct rte_eth_dev.
+ * Check if an Ethernet device was physically removed.
*
+ * @param port_id
+ * The port identifier of the Ethernet device.
* @return
- * void
+ * 1 when the Ethernet device is removed, otherwise 0.
*/
-void _rte_eth_dev_reset(struct rte_eth_dev *dev);
+int __rte_experimental
+rte_eth_dev_is_removed(uint16_t port_id);
/**
* Allocate and set up a receive queue for an Ethernet device.
@@ -2030,6 +1515,7 @@ void _rte_eth_dev_reset(struct rte_eth_dev *dev);
* memory buffers to populate each descriptor of the receive ring.
* @return
* - 0: Success, receive queue correctly set up.
+ * - -EIO: if device is removed.
* - -EINVAL: The size of network buffers which can be allocated from the
* memory pool does not fit the various buffer sizes allowed by the
* device controller.
@@ -2059,7 +1545,7 @@ int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
* the DMA memory allocated for the transmit descriptors of the ring.
* @param tx_conf
* The pointer to the configuration data to be used for the transmit queue.
- * NULL value is allowed, in which case default RX configuration
+ * NULL value is allowed, in which case default TX configuration
* will be used.
* The *tx_conf* structure contains the following data:
* - The *tx_thresh* structure with the values of the Prefetch, Host, and
@@ -2130,6 +1616,7 @@ int rte_eth_dev_is_valid_port(uint16_t port_id);
* @return
* - 0: Success, the receive queue is started.
* - -EINVAL: The port_id or the queue_id out of range.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function not supported in PMD driver.
*/
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
@@ -2146,6 +1633,7 @@ int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
* @return
* - 0: Success, the receive queue is stopped.
* - -EINVAL: The port_id or the queue_id out of range.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function not supported in PMD driver.
*/
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
@@ -2163,6 +1651,7 @@ int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
* @return
* - 0: Success, the transmit queue is started.
* - -EINVAL: The port_id or the queue_id out of range.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function not supported in PMD driver.
*/
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
@@ -2179,12 +1668,11 @@ int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
* @return
* - 0: Success, the transmit queue is stopped.
* - -EINVAL: The port_id or the queue_id out of range.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function not supported in PMD driver.
*/
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
-
-
/**
* Start an Ethernet device.
*
@@ -2211,7 +1699,6 @@ int rte_eth_dev_start(uint16_t port_id);
*/
void rte_eth_dev_stop(uint16_t port_id);
-
/**
* Link up an Ethernet device.
*
@@ -2280,7 +1767,7 @@ void rte_eth_dev_close(uint16_t port_id);
* - (-EINVAL) if port identifier is invalid.
* - (-ENOTSUP) if hardware doesn't support this function.
* - (-EPERM) if not ran from the primary process.
- * - (-EIO) if re-initialisation failed.
+ * - (-EIO) if re-initialisation failed or device is removed.
* - (-ENOMEM) if the reset failed due to OOM.
* - (-EAGAIN) if the reset temporarily failed and should be retried later.
*/
@@ -2516,6 +2003,7 @@ int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
* @return
* 0 on success
* -ENODEV for invalid port_id,
+ * -EIO if device is removed,
* -EINVAL if the xstat_name doesn't exist in port_id
*/
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
@@ -2607,6 +2095,7 @@ void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
* - (0) if successful.
* - (-ENOTSUP) if operation is not supported.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (>0) if *fw_size* is not enough to store firmware version, return
* the size of the non truncated string.
*/
@@ -2678,6 +2167,7 @@ int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
* - (0) if successful.
* - (-ENOTSUP) if operation is not supported.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (-EINVAL) if *mtu* invalid.
* - (-EBUSY) if operation is not allowed when the port is running
*/
@@ -2698,6 +2188,7 @@ int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
* - (0) if successful.
* - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (-ENOSYS) if VLAN filtering on *port_id* disabled.
* - (-EINVAL) if *vlan_id* > 4095.
*/
@@ -2740,6 +2231,7 @@ int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
* - (0) if successful.
* - (-ENOSUP) if hardware-assisted VLAN TPID setup is not supported.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
*/
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
enum rte_vlan_type vlan_type,
@@ -2764,6 +2256,7 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
* - (0) if successful.
* - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
*/
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
@@ -2797,491 +2290,6 @@ int rte_eth_dev_get_vlan_offload(uint16_t port_id);
*/
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
-/**
- *
- * Retrieve a burst of input packets from a receive queue of an Ethernet
- * device. The retrieved packets are stored in *rte_mbuf* structures whose
- * pointers are supplied in the *rx_pkts* array.
- *
- * The rte_eth_rx_burst() function loops, parsing the RX ring of the
- * receive queue, up to *nb_pkts* packets, and for each completed RX
- * descriptor in the ring, it performs the following operations:
- *
- * - Initialize the *rte_mbuf* data structure associated with the
- * RX descriptor according to the information provided by the NIC into
- * that RX descriptor.
- *
- * - Store the *rte_mbuf* data structure into the next entry of the
- * *rx_pkts* array.
- *
- * - Replenish the RX descriptor with a new *rte_mbuf* buffer
- * allocated from the memory pool associated with the receive queue at
- * initialization time.
- *
- * When retrieving an input packet that was scattered by the controller
- * into multiple receive descriptors, the rte_eth_rx_burst() function
- * appends the associated *rte_mbuf* buffers to the first buffer of the
- * packet.
- *
- * The rte_eth_rx_burst() function returns the number of packets
- * actually retrieved, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *rx_pkts* array.
- * A return value equal to *nb_pkts* indicates that the RX queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_eth_rx_burst() function until
- * a value less than *nb_pkts* is returned.
- *
- * This receive method has the following advantages:
- *
- * - It allows a run-to-completion network stack engine to retrieve and
- * to immediately process received packets in a fast burst-oriented
- * approach, avoiding the overhead of unnecessary intermediate packet
- * queue/dequeue operations.
- *
- * - Conversely, it also allows an asynchronous-oriented processing
- * method to retrieve bursts of received packets and to immediately
- * queue them for further parallel processing by another logical core,
- * for instance. However, instead of having received packets being
- * individually queued by the driver, this approach allows the caller
- * of the rte_eth_rx_burst() function to queue a burst of retrieved
- * packets at a time and therefore dramatically reduce the cost of
- * enqueue/dequeue operations per packet.
- *
- * - It allows the rte_eth_rx_burst() function of the driver to take
- * advantage of burst-oriented hardware features (CPU cache,
- * prefetch instructions, and so on) to minimize the number of CPU
- * cycles per packet.
- *
- * To summarize, the proposed receive API enables many
- * burst-oriented optimizations in both synchronous and asynchronous
- * packet processing environments with no overhead in both cases.
- *
- * The rte_eth_rx_burst() function does not provide any error
- * notification to avoid the corresponding overhead. As a hint, the
- * upper-level application might check the status of the device link once
- * being systematically returned a 0 value for a given number of tries.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param queue_id
- * The index of the receive queue from which to retrieve input packets.
- * The value must be in the range [0, nb_rx_queue - 1] previously supplied
- * to rte_eth_dev_configure().
- * @param rx_pkts
- * The address of an array of pointers to *rte_mbuf* structures that
- * must be large enough to store *nb_pkts* pointers in it.
- * @param nb_pkts
- * The maximum number of packets to retrieve.
- * @return
- * The number of packets actually retrieved, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *rx_pkts* array.
- */
-static inline uint16_t
-rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
- struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
-
- if (queue_id >= dev->data->nb_rx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
- return 0;
- }
-#endif
- int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
- rx_pkts, nb_pkts);
-
-#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
-
- if (unlikely(cb != NULL)) {
- do {
- nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
- nb_pkts, cb->param);
- cb = cb->next;
- } while (cb != NULL);
- }
-#endif
-
- return nb_rx;
-}
-
-/**
- * Get the number of used descriptors of a rx queue
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param queue_id
- * The queue id on the specific port.
- * @return
- * The number of used descriptors in the specific queue, or:
- * (-EINVAL) if *port_id* or *queue_id* is invalid
- * (-ENOTSUP) if the device does not support this function
- */
-static inline int
-rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
- if (queue_id >= dev->data->nb_rx_queues)
- return -EINVAL;
-
- return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
-}
-
-/**
- * Check if the DD bit of the specific RX descriptor in the queue has been set
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param queue_id
- * The queue id on the specific port.
- * @param offset
- * The offset of the descriptor ID from tail.
- * @return
- * - (1) if the specific DD bit is set.
- * - (0) if the specific DD bit is not set.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOTSUP) if the device does not support this function
- */
-static inline int
-rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
- return (*dev->dev_ops->rx_descriptor_done)( \
- dev->data->rx_queues[queue_id], offset);
-}
-
-#define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */
-#define RTE_ETH_RX_DESC_DONE 1 /**< Desc done, filled by hw. */
-#define RTE_ETH_RX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
-
-/**
- * Check the status of a Rx descriptor in the queue
- *
- * It should be called in a similar context than the Rx function:
- * - on a dataplane core
- * - not concurrently on the same queue
- *
- * Since it's a dataplane function, no check is performed on port_id and
- * queue_id. The caller must therefore ensure that the port is enabled
- * and the queue is configured and running.
- *
- * Note: accessing to a random descriptor in the ring may trigger cache
- * misses and have a performance impact.
- *
- * @param port_id
- * A valid port identifier of the Ethernet device which.
- * @param queue_id
- * A valid Rx queue identifier on this port.
- * @param offset
- * The offset of the descriptor starting from tail (0 is the next
- * packet to be received by the driver).
- *
- * @return
- * - (RTE_ETH_RX_DESC_AVAIL): Descriptor is available for the hardware to
- * receive a packet.
- * - (RTE_ETH_RX_DESC_DONE): Descriptor is done, it is filled by hw, but
- * not yet processed by the driver (i.e. in the receive queue).
- * - (RTE_ETH_RX_DESC_UNAVAIL): Descriptor is unavailable, either hold by
- * the driver and not yet returned to hw, or reserved by the hw.
- * - (-EINVAL) bad descriptor offset.
- * - (-ENOTSUP) if the device does not support this function.
- * - (-ENODEV) bad port or queue (only if compiled with debug).
- */
-static inline int
-rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
- uint16_t offset)
-{
- struct rte_eth_dev *dev;
- void *rxq;
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-#endif
- dev = &rte_eth_devices[port_id];
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- if (queue_id >= dev->data->nb_rx_queues)
- return -ENODEV;
-#endif
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
- rxq = dev->data->rx_queues[queue_id];
-
- return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
-}
-
-#define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */
-#define RTE_ETH_TX_DESC_DONE 1 /**< Desc done, packet is transmitted. */
-#define RTE_ETH_TX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
-
-/**
- * Check the status of a Tx descriptor in the queue.
- *
- * It should be called in a similar context than the Tx function:
- * - on a dataplane core
- * - not concurrently on the same queue
- *
- * Since it's a dataplane function, no check is performed on port_id and
- * queue_id. The caller must therefore ensure that the port is enabled
- * and the queue is configured and running.
- *
- * Note: accessing to a random descriptor in the ring may trigger cache
- * misses and have a performance impact.
- *
- * @param port_id
- * A valid port identifier of the Ethernet device which.
- * @param queue_id
- * A valid Tx queue identifier on this port.
- * @param offset
- * The offset of the descriptor starting from tail (0 is the place where
- * the next packet will be send).
- *
- * @return
- * - (RTE_ETH_TX_DESC_FULL) Descriptor is being processed by the hw, i.e.
- * in the transmit queue.
- * - (RTE_ETH_TX_DESC_DONE) Hardware is done with this descriptor, it can
- * be reused by the driver.
- * - (RTE_ETH_TX_DESC_UNAVAIL): Descriptor is unavailable, reserved by the
- * driver or the hardware.
- * - (-EINVAL) bad descriptor offset.
- * - (-ENOTSUP) if the device does not support this function.
- * - (-ENODEV) bad port or queue (only if compiled with debug).
- */
-static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
- uint16_t queue_id, uint16_t offset)
-{
- struct rte_eth_dev *dev;
- void *txq;
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-#endif
- dev = &rte_eth_devices[port_id];
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- if (queue_id >= dev->data->nb_tx_queues)
- return -ENODEV;
-#endif
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
- txq = dev->data->tx_queues[queue_id];
-
- return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
-}
-
-/**
- * Send a burst of output packets on a transmit queue of an Ethernet device.
- *
- * The rte_eth_tx_burst() function is invoked to transmit output packets
- * on the output queue *queue_id* of the Ethernet device designated by its
- * *port_id*.
- * The *nb_pkts* parameter is the number of packets to send which are
- * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
- * allocated from a pool created with rte_pktmbuf_pool_create().
- * The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
- * up to the number of transmit descriptors available in the TX ring of the
- * transmit queue.
- * For each packet to send, the rte_eth_tx_burst() function performs
- * the following operations:
- *
- * - Pick up the next available descriptor in the transmit ring.
- *
- * - Free the network buffer previously sent with that descriptor, if any.
- *
- * - Initialize the transmit descriptor with the information provided
- * in the *rte_mbuf data structure.
- *
- * In the case of a segmented packet composed of a list of *rte_mbuf* buffers,
- * the rte_eth_tx_burst() function uses several transmit descriptors
- * of the ring.
- *
- * The rte_eth_tx_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent, and this is likely to signify that other output packets
- * could be immediately transmitted again. Applications that implement a
- * "send as many packets to transmit as possible" policy can check this
- * specific case and keep invoking the rte_eth_tx_burst() function until
- * a value less than *nb_pkts* is returned.
- *
- * It is the responsibility of the rte_eth_tx_burst() function to
- * transparently free the memory buffers of packets previously sent.
- * This feature is driven by the *tx_free_thresh* value supplied to the
- * rte_eth_dev_configure() function at device configuration time.
- * When the number of free TX descriptors drops below this threshold, the
- * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf* buffers
- * of those packets whose transmission was effectively completed.
- *
- * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
- * invoke this function concurrently on the same tx queue without SW lock.
- * @see rte_eth_dev_info_get, struct rte_eth_txconf::txq_flags
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param queue_id
- * The index of the transmit queue through which output packets must be
- * sent.
- * The value must be in the range [0, nb_tx_queue - 1] previously supplied
- * to rte_eth_dev_configure().
- * @param tx_pkts
- * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
- * which contain the output packets.
- * @param nb_pkts
- * The maximum number of packets to transmit.
- * @return
- * The number of output packets actually stored in transmit descriptors of
- * the transmit ring. The return value can be less than the value of the
- * *tx_pkts* parameter when the transmit ring is full or has been filled up.
- */
-static inline uint16_t
-rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
-
- if (queue_id >= dev->data->nb_tx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
- return 0;
- }
-#endif
-
-#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
-
- if (unlikely(cb != NULL)) {
- do {
- nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
- cb->param);
- cb = cb->next;
- } while (cb != NULL);
- }
-#endif
-
- return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
-}
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
- * Process a burst of output packets on a transmit queue of an Ethernet device.
- *
- * The rte_eth_tx_prepare() function is invoked to prepare output packets to be
- * transmitted on the output queue *queue_id* of the Ethernet device designated
- * by its *port_id*.
- * The *nb_pkts* parameter is the number of packets to be prepared which are
- * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
- * allocated from a pool created with rte_pktmbuf_pool_create().
- * For each packet to send, the rte_eth_tx_prepare() function performs
- * the following operations:
- *
- * - Check if packet meets devices requirements for tx offloads.
- *
- * - Check limitations about number of segments.
- *
- * - Check additional requirements when debug is enabled.
- *
- * - Update and/or reset required checksums when tx offload is set for packet.
- *
- * Since this function can modify packet data, provided mbufs must be safely
- * writable (e.g. modified data cannot be in shared segment).
- *
- * The rte_eth_tx_prepare() function returns the number of packets ready to be
- * sent. A return value equal to *nb_pkts* means that all packets are valid and
- * ready to be sent, otherwise stops processing on the first invalid packet and
- * leaves the rest packets untouched.
- *
- * When this functionality is not implemented in the driver, all packets are
- * are returned untouched.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * The value must be a valid port id.
- * @param queue_id
- * The index of the transmit queue through which output packets must be
- * sent.
- * The value must be in the range [0, nb_tx_queue - 1] previously supplied
- * to rte_eth_dev_configure().
- * @param tx_pkts
- * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
- * which contain the output packets.
- * @param nb_pkts
- * The maximum number of packets to process.
- * @return
- * The number of packets correct and ready to be sent. The return value can be
- * less than the value of the *tx_pkts* parameter when some packet doesn't
- * meet devices requirements with rte_errno set appropriately:
- * - -EINVAL: offload flags are not correctly set
- * - -ENOTSUP: the offload feature is not supported by the hardware
- *
- */
-
-#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
-
-static inline uint16_t
-rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- struct rte_eth_dev *dev;
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
- rte_errno = -EINVAL;
- return 0;
- }
-#endif
-
- dev = &rte_eth_devices[port_id];
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- if (queue_id >= dev->data->nb_tx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
- rte_errno = -EINVAL;
- return 0;
- }
-#endif
-
- if (!dev->tx_pkt_prepare)
- return nb_pkts;
-
- return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
- tx_pkts, nb_pkts);
-}
-
-#else
-
-/*
- * Native NOOP operation for compilation targets which doesn't require any
- * preparations steps, and functional NOOP may introduce unnecessary performance
- * drop.
- *
- * Generally this is not a good idea to turn it on globally and didn't should
- * be used if behavior of tx_preparation can change.
- */
-
-static inline uint16_t
-rte_eth_tx_prepare(__rte_unused uint16_t port_id,
- __rte_unused uint16_t queue_id,
- __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- return nb_pkts;
-}
-
-#endif
-
typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
void *userdata);
@@ -3321,91 +2329,6 @@ int
rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
/**
- * Send any packets queued up for transmission on a port and HW queue
- *
- * This causes an explicit flush of packets previously buffered via the
- * rte_eth_tx_buffer() function. It returns the number of packets successfully
- * sent to the NIC, and calls the error callback for any unsent packets. Unless
- * explicitly set up otherwise, the default callback simply frees the unsent
- * packets back to the owning mempool.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param queue_id
- * The index of the transmit queue through which output packets must be
- * sent.
- * The value must be in the range [0, nb_tx_queue - 1] previously supplied
- * to rte_eth_dev_configure().
- * @param buffer
- * Buffer of packets to be transmit.
- * @return
- * The number of packets successfully sent to the Ethernet device. The error
- * callback is called for any packets which could not be sent.
- */
-static inline uint16_t
-rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_dev_tx_buffer *buffer)
-{
- uint16_t sent;
- uint16_t to_send = buffer->length;
-
- if (to_send == 0)
- return 0;
-
- sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
-
- buffer->length = 0;
-
- /* All packets sent, or to be dealt with by callback below */
- if (unlikely(sent != to_send))
- buffer->error_callback(&buffer->pkts[sent], to_send - sent,
- buffer->error_userdata);
-
- return sent;
-}
-
-/**
- * Buffer a single packet for future transmission on a port and queue
- *
- * This function takes a single mbuf/packet and buffers it for later
- * transmission on the particular port and queue specified. Once the buffer is
- * full of packets, an attempt will be made to transmit all the buffered
- * packets. In case of error, where not all packets can be transmitted, a
- * callback is called with the unsent packets as a parameter. If no callback
- * is explicitly set up, the unsent packets are just freed back to the owning
- * mempool. The function returns the number of packets actually sent i.e.
- * 0 if no buffer flush occurred, otherwise the number of packets successfully
- * flushed
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param queue_id
- * The index of the transmit queue through which output packets must be
- * sent.
- * The value must be in the range [0, nb_tx_queue - 1] previously supplied
- * to rte_eth_dev_configure().
- * @param buffer
- * Buffer used to collect packets to be sent.
- * @param tx_pkt
- * Pointer to the packet mbuf to be sent.
- * @return
- * 0 = packet has been buffered for later transmission
- * N > 0 = packet has been buffered, and the buffer was subsequently flushed,
- * causing N packets to be sent, and the error callback to be called for
- * the rest.
- */
-static __rte_always_inline uint16_t
-rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
-{
- buffer->pkts[buffer->length++] = tx_pkt;
- if (buffer->length < buffer->size)
- return 0;
-
- return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
-}
-
-/**
* Configure a callback for buffered packets which cannot be sent
*
* Register a specific callback to be called when an attempt is made to send
@@ -3505,6 +2428,7 @@ rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
* @return
* Failure: < 0
* -ENODEV: Invalid interface
+ * -EIO: device is removed
* -ENOTSUP: Driver does not support function
* Success: >= 0
* 0-n: Number of packets freed. More packets may still remain in ring that
@@ -3526,6 +2450,8 @@ enum rte_eth_event_type {
RTE_ETH_EVENT_VF_MBOX, /**< message from the VF received by PF */
RTE_ETH_EVENT_MACSEC, /**< MACsec offload related event */
RTE_ETH_EVENT_INTR_RMV, /**< device removal event */
+ RTE_ETH_EVENT_NEW, /**< port is probed */
+ RTE_ETH_EVENT_DESTROY, /**< port is released */
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
@@ -3533,13 +2459,12 @@ typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
enum rte_eth_event_type event, void *cb_arg, void *ret_param);
/**< user application callback to be registered for interrupts */
-
-
/**
- * Register a callback function for specific port id.
+ * Register a callback function for port event.
*
* @param port_id
* Port id.
+ * RTE_ETH_ALL means register the event for all port ids.
* @param event
* Event interested.
* @param cb_fn
@@ -3556,10 +2481,11 @@ int rte_eth_dev_callback_register(uint16_t port_id,
rte_eth_dev_cb_fn cb_fn, void *cb_arg);
/**
- * Unregister a callback function for specific port id.
+ * Unregister a callback function for port event.
*
* @param port_id
* Port id.
+ * RTE_ETH_ALL means unregister the event for all port ids.
* @param event
* Event interested.
* @param cb_fn
@@ -3577,28 +2503,6 @@ int rte_eth_dev_callback_unregister(uint16_t port_id,
rte_eth_dev_cb_fn cb_fn, void *cb_arg);
/**
- * @internal Executes all the user application registered callbacks for
- * the specific device. It is for DPDK internal user only. User
- * application should not call it directly.
- *
- * @param dev
- * Pointer to struct rte_eth_dev.
- * @param event
- * Eth device interrupt event type.
- * @param cb_arg
- * callback parameter.
- * @param ret_param
- * To pass data back to user application.
- * This allows the user application to decide if a particular function
- * is permitted or not.
- *
- * @return
- * int
- */
-int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *cb_arg, void *ret_param);
-
-/**
* When there is no rx packet coming in Rx Queue for a long time, we can
* sleep lcore related to RX Queue for power saving, and enable rx interrupt
* to be triggered when Rx packet arrives.
@@ -3617,6 +2521,7 @@ int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
* - (-ENOTSUP) if underlying hardware OR driver doesn't support
* that operation.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
*/
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
@@ -3638,6 +2543,7 @@ int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
* - (-ENOTSUP) if underlying hardware OR driver doesn't support
* that operation.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
*/
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
@@ -3695,6 +2601,7 @@ int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
* - (-ENOTSUP) if underlying hardware OR driver doesn't support
* that operation.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
*/
int rte_eth_led_on(uint16_t port_id);
@@ -3709,6 +2616,7 @@ int rte_eth_led_on(uint16_t port_id);
* - (-ENOTSUP) if underlying hardware OR driver doesn't support
* that operation.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
*/
int rte_eth_led_off(uint16_t port_id);
@@ -3723,6 +2631,7 @@ int rte_eth_led_off(uint16_t port_id);
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support flow control.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
*/
int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
struct rte_eth_fc_conf *fc_conf);
@@ -3739,7 +2648,7 @@ int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
* - (-ENOTSUP) if hardware doesn't support flow control mode.
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter
- * - (-EIO) if flow control setup failure
+ * - (-EIO) if flow control setup failure or device is removed.
*/
int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
struct rte_eth_fc_conf *fc_conf);
@@ -3757,7 +2666,7 @@ int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
* - (-ENOTSUP) if hardware doesn't support priority flow control mode.
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter
- * - (-EIO) if flow control setup failure
+ * - (-EIO) if flow control setup failure or device is removed.
*/
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
struct rte_eth_pfc_conf *pfc_conf);
@@ -3766,7 +2675,7 @@ int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
* Add a MAC address to an internal array of addresses used to enable whitelist
* filtering to accept packets only if the destination MAC address matches.
*
- * @param port
+ * @param port_id
* The port identifier of the Ethernet device.
* @param mac_addr
* The MAC address to add.
@@ -3774,19 +2683,20 @@ int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
* VMDq pool index to associate address with (if VMDq is enabled). If VMDq is
* not enabled, this should be set to 0.
* @return
- * - (0) if successfully added or *mac_addr" was already added.
+ * - (0) if successfully added or *mac_addr* was already added.
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-ENODEV) if *port* is invalid.
+ * - (-EIO) if device is removed.
* - (-ENOSPC) if no more MAC addresses can be added.
* - (-EINVAL) if MAC address is invalid.
*/
-int rte_eth_dev_mac_addr_add(uint16_t port, struct ether_addr *mac_addr,
+int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr,
uint32_t pool);
/**
* Remove a MAC address from the internal array of addresses.
*
- * @param port
+ * @param port_id
* The port identifier of the Ethernet device.
* @param mac_addr
* MAC address to remove.
@@ -3796,12 +2706,12 @@ int rte_eth_dev_mac_addr_add(uint16_t port, struct ether_addr *mac_addr,
* - (-ENODEV) if *port* invalid.
* - (-EADDRINUSE) if attempting to remove the default MAC address
*/
-int rte_eth_dev_mac_addr_remove(uint16_t port, struct ether_addr *mac_addr);
+int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr);
/**
* Set the default MAC address.
*
- * @param port
+ * @param port_id
* The port identifier of the Ethernet device.
* @param mac_addr
* New default MAC address.
@@ -3811,13 +2721,13 @@ int rte_eth_dev_mac_addr_remove(uint16_t port, struct ether_addr *mac_addr);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if MAC address is invalid.
*/
-int rte_eth_dev_default_mac_addr_set(uint16_t port,
+int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
struct ether_addr *mac_addr);
/**
* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
*
- * @param port
+ * @param port_id
* The port identifier of the Ethernet device.
* @param reta_conf
* RETA to update.
@@ -3828,15 +2738,16 @@ int rte_eth_dev_default_mac_addr_set(uint16_t port,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
+ * - (-EIO) if device is removed.
*/
-int rte_eth_dev_rss_reta_update(uint16_t port,
+int rte_eth_dev_rss_reta_update(uint16_t port_id,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
/**
* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
*
- * @param port
+ * @param port_id
* The port identifier of the Ethernet device.
* @param reta_conf
* RETA to query.
@@ -3847,8 +2758,9 @@ int rte_eth_dev_rss_reta_update(uint16_t port,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
+ * - (-EIO) if device is removed.
*/
-int rte_eth_dev_rss_reta_query(uint16_t port,
+int rte_eth_dev_rss_reta_query(uint16_t port_id,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
@@ -3857,7 +2769,7 @@ int rte_eth_dev_rss_reta_query(uint16_t port,
* MAC address, and the packet is routed to all VFs for which the RX mode is
* accept packets that match the unicast hash table.
*
- * @param port
+ * @param port_id
* The port identifier of the Ethernet device.
* @param addr
* Unicast MAC address.
@@ -3868,9 +2780,10 @@ int rte_eth_dev_rss_reta_query(uint16_t port,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_uc_hash_table_set(uint16_t port, struct ether_addr *addr,
+int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
uint8_t on);
/**
@@ -3878,7 +2791,7 @@ int rte_eth_dev_uc_hash_table_set(uint16_t port, struct ether_addr *addr,
* Ethernet MAC addresses,the packet is routed to all VFs for which the RX
* mode is accept packets that match the unicast hash table.
*
- * @param port
+ * @param port_id
* The port identifier of the Ethernet device.
* @param on
* 1 - Set all unicast hash bitmaps for receiving all the Ethernet
@@ -3888,9 +2801,10 @@ int rte_eth_dev_uc_hash_table_set(uint16_t port, struct ether_addr *addr,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_uc_all_hash_table_set(uint16_t port, uint8_t on);
+int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
/**
* Set a traffic mirroring rule on an Ethernet device
@@ -3911,6 +2825,7 @@ int rte_eth_dev_uc_all_hash_table_set(uint16_t port, uint8_t on);
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (-EINVAL) if the mr_conf information is not correct.
*/
int rte_eth_mirror_rule_set(uint16_t port_id,
@@ -3929,6 +2844,7 @@ int rte_eth_mirror_rule_set(uint16_t port_id,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (-EINVAL) if bad parameter.
*/
int rte_eth_mirror_rule_reset(uint16_t port_id,
@@ -3947,6 +2863,7 @@ int rte_eth_mirror_rule_reset(uint16_t port_id,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (-EINVAL) if bad parameter.
*/
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
@@ -3962,6 +2879,7 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
* @return
* - (0) if successful.
* - (-ENODEV) if port identifier is invalid.
+ * - (-EIO) if device is removed.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
@@ -3979,6 +2897,7 @@ int rte_eth_dev_rss_hash_update(uint16_t port_id,
* @return
* - (0) if successful.
* - (-ENODEV) if port identifier is invalid.
+ * - (-EIO) if device is removed.
* - (-ENOTSUP) if hardware doesn't support RSS.
*/
int
@@ -4000,6 +2919,7 @@ rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
* @return
* - (0) if successful.
* - (-ENODEV) if port identifier is invalid.
+ * - (-EIO) if device is removed.
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
@@ -4022,6 +2942,7 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
* @return
* - (0) if successful.
* - (-ENODEV) if port identifier is invalid.
+ * - (-EIO) if device is removed.
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
@@ -4040,6 +2961,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support this filter type.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
*/
int rte_eth_dev_filter_supported(uint16_t port_id,
enum rte_filter_type filter_type);
@@ -4060,6 +2982,7 @@ int rte_eth_dev_filter_supported(uint16_t port_id,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
*/
int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
@@ -4075,6 +2998,7 @@ int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
* @return
* - (0) if successful.
* - (-ENODEV) if port identifier is invalid.
+ * - (-EIO) if device is removed.
* - (-ENOTSUP) if hardware doesn't support.
*/
int rte_eth_dev_get_dcb_info(uint16_t port_id,
@@ -4162,6 +3086,8 @@ void *rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
void *rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param);
+struct rte_eth_rxtx_callback;
+
/**
* Remove an RX packet callback from a given port and queue.
*
@@ -4282,6 +3208,7 @@ int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
*/
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
@@ -4295,6 +3222,7 @@ int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
* - (>=0) EEPROM size if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
*/
int rte_eth_dev_get_eeprom_length(uint16_t port_id);
@@ -4311,6 +3239,7 @@ int rte_eth_dev_get_eeprom_length(uint16_t port_id);
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
*/
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
@@ -4327,6 +3256,7 @@ int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - others depends on the specific operations implementation.
*/
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
@@ -4345,6 +3275,7 @@ int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
* @return
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
* - (-ENOTSUP) if PMD of *port_id* doesn't support multicast filtering.
* - (-ENOSPC) if *port_id* has not enough multicast filtering resources.
*/
@@ -4361,6 +3292,7 @@ int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
* @return
* - 0: Success.
* - -ENODEV: The port ID is invalid.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
int rte_eth_timesync_enable(uint16_t port_id);
@@ -4374,6 +3306,7 @@ int rte_eth_timesync_enable(uint16_t port_id);
* @return
* - 0: Success.
* - -ENODEV: The port ID is invalid.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
int rte_eth_timesync_disable(uint16_t port_id);
@@ -4393,6 +3326,7 @@ int rte_eth_timesync_disable(uint16_t port_id);
* - 0: Success.
* - -EINVAL: No timestamp is available.
* - -ENODEV: The port ID is invalid.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
@@ -4410,6 +3344,7 @@ int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
* - 0: Success.
* - -EINVAL: No timestamp is available.
* - -ENODEV: The port ID is invalid.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
@@ -4429,6 +3364,7 @@ int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
* @return
* - 0: Success.
* - -ENODEV: The port ID is invalid.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
@@ -4464,35 +3400,12 @@ int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
* - 0: Success.
* - -EINVAL: No timestamp is available.
* - -ENODEV: The port ID is invalid.
+ * - -EIO: if device is removed.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
/**
- * Create memzone for HW rings.
- * malloc can't be used as the physical address is needed.
- * If the memzone is already created, then this function returns a ptr
- * to the old one.
- *
- * @param eth_dev
- * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
- * @param name
- * The name of the memory zone
- * @param queue_id
- * The index of the queue to add to name
- * @param size
- * The sizeof of the memory area
- * @param align
- * Alignment for resulting memzone. Must be a power of 2.
- * @param socket_id
- * The *socket_id* argument is the socket identifier in case of NUMA.
- */
-const struct rte_memzone *
-rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
- uint16_t queue_id, size_t size,
- unsigned align, int socket_id);
-
-/**
* Config l2 tunnel ether type of an Ethernet device for filtering specific
* tunnel packets by ether type.
*
@@ -4504,6 +3417,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
* @return
* - (0) if successful.
* - (-ENODEV) if port identifier is invalid.
+ * - (-EIO) if device is removed.
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
@@ -4531,6 +3445,7 @@ rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
* @return
* - (0) if successful.
* - (-ENODEV) if port identifier is invalid.
+ * - (-EIO) if device is removed.
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
@@ -4588,7 +3503,6 @@ int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
uint16_t *nb_rx_desc,
uint16_t *nb_tx_desc);
-
/**
* Test if a port supports specific mempool ops.
*
@@ -4606,6 +3520,591 @@ int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
int
rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
+/**
+ * Get the security context for the Ethernet device.
+ *
+ * @param port_id
+ * Port identifier of the Ethernet device
+ * @return
+ * - NULL on error.
+ * - pointer to security context on success.
+ */
+void *
+rte_eth_dev_get_sec_ctx(uint8_t port_id);
+
+
+#include <rte_ethdev_core.h>
+
+/**
+ *
+ * Retrieve a burst of input packets from a receive queue of an Ethernet
+ * device. The retrieved packets are stored in *rte_mbuf* structures whose
+ * pointers are supplied in the *rx_pkts* array.
+ *
+ * The rte_eth_rx_burst() function loops, parsing the RX ring of the
+ * receive queue, up to *nb_pkts* packets, and for each completed RX
+ * descriptor in the ring, it performs the following operations:
+ *
+ * - Initialize the *rte_mbuf* data structure associated with the
+ * RX descriptor according to the information provided by the NIC into
+ * that RX descriptor.
+ *
+ * - Store the *rte_mbuf* data structure into the next entry of the
+ * *rx_pkts* array.
+ *
+ * - Replenish the RX descriptor with a new *rte_mbuf* buffer
+ * allocated from the memory pool associated with the receive queue at
+ * initialization time.
+ *
+ * When retrieving an input packet that was scattered by the controller
+ * into multiple receive descriptors, the rte_eth_rx_burst() function
+ * appends the associated *rte_mbuf* buffers to the first buffer of the
+ * packet.
+ *
+ * The rte_eth_rx_burst() function returns the number of packets
+ * actually retrieved, which is the number of *rte_mbuf* data structures
+ * effectively supplied into the *rx_pkts* array.
+ * A return value equal to *nb_pkts* indicates that the RX queue contained
+ * at least *rx_pkts* packets, and this is likely to signify that other
+ * received packets remain in the input queue. Applications implementing
+ * a "retrieve as much received packets as possible" policy can check this
+ * specific case and keep invoking the rte_eth_rx_burst() function until
+ * a value less than *nb_pkts* is returned.
+ *
+ * This receive method has the following advantages:
+ *
+ * - It allows a run-to-completion network stack engine to retrieve and
+ * to immediately process received packets in a fast burst-oriented
+ * approach, avoiding the overhead of unnecessary intermediate packet
+ * queue/dequeue operations.
+ *
+ * - Conversely, it also allows an asynchronous-oriented processing
+ * method to retrieve bursts of received packets and to immediately
+ * queue them for further parallel processing by another logical core,
+ * for instance. However, instead of having received packets being
+ * individually queued by the driver, this approach allows the caller
+ * of the rte_eth_rx_burst() function to queue a burst of retrieved
+ * packets at a time and therefore dramatically reduce the cost of
+ * enqueue/dequeue operations per packet.
+ *
+ * - It allows the rte_eth_rx_burst() function of the driver to take
+ * advantage of burst-oriented hardware features (CPU cache,
+ * prefetch instructions, and so on) to minimize the number of CPU
+ * cycles per packet.
+ *
+ * To summarize, the proposed receive API enables many
+ * burst-oriented optimizations in both synchronous and asynchronous
+ * packet processing environments with no overhead in both cases.
+ *
+ * The rte_eth_rx_burst() function does not provide any error
+ * notification to avoid the corresponding overhead. As a hint, the
+ * upper-level application might check the status of the device link once
+ * being systematically returned a 0 value for a given number of tries.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the receive queue from which to retrieve input packets.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param rx_pkts
+ * The address of an array of pointers to *rte_mbuf* structures that
+ * must be large enough to store *nb_pkts* pointers in it.
+ * @param nb_pkts
+ * The maximum number of packets to retrieve.
+ * @return
+ * The number of packets actually retrieved, which is the number
+ * of pointers to *rte_mbuf* structures effectively supplied to the
+ * *rx_pkts* array.
+ */
+static inline uint16_t
+rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
+ struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
+
+ if (queue_id >= dev->data->nb_rx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+ return 0;
+ }
+#endif
+ int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
+ rx_pkts, nb_pkts);
+
+#ifdef RTE_ETHDEV_RXTX_CALLBACKS
+ struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
+
+ if (unlikely(cb != NULL)) {
+ do {
+ nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
+ nb_pkts, cb->param);
+ cb = cb->next;
+ } while (cb != NULL);
+ }
+#endif
+
+ return nb_rx;
+}
+
+/**
+ * Get the number of used descriptors of a rx queue
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The queue id on the specific port.
+ * @return
+ * The number of used descriptors in the specific queue, or:
+ * (-EINVAL) if *port_id* or *queue_id* is invalid
+ * (-ENOTSUP) if the device does not support this function
+ */
+static inline int
+rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
+ if (queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+}
+
+/**
+ * Check if the DD bit of the specific RX descriptor in the queue has been set
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The queue id on the specific port.
+ * @param offset
+ * The offset of the descriptor ID from tail.
+ * @return
+ * - (1) if the specific DD bit is set.
+ * - (0) if the specific DD bit is not set.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOTSUP) if the device does not support this function
+ */
+static inline int
+rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
+ return (*dev->dev_ops->rx_descriptor_done)( \
+ dev->data->rx_queues[queue_id], offset);
+}
+
+#define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */
+#define RTE_ETH_RX_DESC_DONE 1 /**< Desc done, filled by hw. */
+#define RTE_ETH_RX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
+
+/**
+ * Check the status of a Rx descriptor in the queue
+ *
+ * It should be called in a similar context than the Rx function:
+ * - on a dataplane core
+ * - not concurrently on the same queue
+ *
+ * Since it's a dataplane function, no check is performed on port_id and
+ * queue_id. The caller must therefore ensure that the port is enabled
+ * and the queue is configured and running.
+ *
+ * Note: accessing to a random descriptor in the ring may trigger cache
+ * misses and have a performance impact.
+ *
+ * @param port_id
+ * A valid port identifier of the Ethernet device which.
+ * @param queue_id
+ * A valid Rx queue identifier on this port.
+ * @param offset
+ * The offset of the descriptor starting from tail (0 is the next
+ * packet to be received by the driver).
+ *
+ * @return
+ * - (RTE_ETH_RX_DESC_AVAIL): Descriptor is available for the hardware to
+ * receive a packet.
+ * - (RTE_ETH_RX_DESC_DONE): Descriptor is done, it is filled by hw, but
+ * not yet processed by the driver (i.e. in the receive queue).
+ * - (RTE_ETH_RX_DESC_UNAVAIL): Descriptor is unavailable, either hold by
+ * the driver and not yet returned to hw, or reserved by the hw.
+ * - (-EINVAL) bad descriptor offset.
+ * - (-ENOTSUP) if the device does not support this function.
+ * - (-ENODEV) bad port or queue (only if compiled with debug).
+ */
+static inline int
+rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
+ uint16_t offset)
+{
+ struct rte_eth_dev *dev;
+ void *rxq;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+#endif
+ dev = &rte_eth_devices[port_id];
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (queue_id >= dev->data->nb_rx_queues)
+ return -ENODEV;
+#endif
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
+ rxq = dev->data->rx_queues[queue_id];
+
+ return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
+}
+
+#define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */
+#define RTE_ETH_TX_DESC_DONE 1 /**< Desc done, packet is transmitted. */
+#define RTE_ETH_TX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
+
+/**
+ * Check the status of a Tx descriptor in the queue.
+ *
+ * It should be called in a similar context than the Tx function:
+ * - on a dataplane core
+ * - not concurrently on the same queue
+ *
+ * Since it's a dataplane function, no check is performed on port_id and
+ * queue_id. The caller must therefore ensure that the port is enabled
+ * and the queue is configured and running.
+ *
+ * Note: accessing to a random descriptor in the ring may trigger cache
+ * misses and have a performance impact.
+ *
+ * @param port_id
+ * A valid port identifier of the Ethernet device which.
+ * @param queue_id
+ * A valid Tx queue identifier on this port.
+ * @param offset
+ * The offset of the descriptor starting from tail (0 is the place where
+ * the next packet will be send).
+ *
+ * @return
+ * - (RTE_ETH_TX_DESC_FULL) Descriptor is being processed by the hw, i.e.
+ * in the transmit queue.
+ * - (RTE_ETH_TX_DESC_DONE) Hardware is done with this descriptor, it can
+ * be reused by the driver.
+ * - (RTE_ETH_TX_DESC_UNAVAIL): Descriptor is unavailable, reserved by the
+ * driver or the hardware.
+ * - (-EINVAL) bad descriptor offset.
+ * - (-ENOTSUP) if the device does not support this function.
+ * - (-ENODEV) bad port or queue (only if compiled with debug).
+ */
+static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
+ uint16_t queue_id, uint16_t offset)
+{
+ struct rte_eth_dev *dev;
+ void *txq;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+#endif
+ dev = &rte_eth_devices[port_id];
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (queue_id >= dev->data->nb_tx_queues)
+ return -ENODEV;
+#endif
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
+ txq = dev->data->tx_queues[queue_id];
+
+ return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
+}
+
+/**
+ * Send a burst of output packets on a transmit queue of an Ethernet device.
+ *
+ * The rte_eth_tx_burst() function is invoked to transmit output packets
+ * on the output queue *queue_id* of the Ethernet device designated by its
+ * *port_id*.
+ * The *nb_pkts* parameter is the number of packets to send which are
+ * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
+ * allocated from a pool created with rte_pktmbuf_pool_create().
+ * The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
+ * up to the number of transmit descriptors available in the TX ring of the
+ * transmit queue.
+ * For each packet to send, the rte_eth_tx_burst() function performs
+ * the following operations:
+ *
+ * - Pick up the next available descriptor in the transmit ring.
+ *
+ * - Free the network buffer previously sent with that descriptor, if any.
+ *
+ * - Initialize the transmit descriptor with the information provided
+ * in the *rte_mbuf data structure.
+ *
+ * In the case of a segmented packet composed of a list of *rte_mbuf* buffers,
+ * the rte_eth_tx_burst() function uses several transmit descriptors
+ * of the ring.
+ *
+ * The rte_eth_tx_burst() function returns the number of packets it
+ * actually sent. A return value equal to *nb_pkts* means that all packets
+ * have been sent, and this is likely to signify that other output packets
+ * could be immediately transmitted again. Applications that implement a
+ * "send as many packets to transmit as possible" policy can check this
+ * specific case and keep invoking the rte_eth_tx_burst() function until
+ * a value less than *nb_pkts* is returned.
+ *
+ * It is the responsibility of the rte_eth_tx_burst() function to
+ * transparently free the memory buffers of packets previously sent.
+ * This feature is driven by the *tx_free_thresh* value supplied to the
+ * rte_eth_dev_configure() function at device configuration time.
+ * When the number of free TX descriptors drops below this threshold, the
+ * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf* buffers
+ * of those packets whose transmission was effectively completed.
+ *
+ * If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
+ * invoke this function concurrently on the same tx queue without SW lock.
+ * @see rte_eth_dev_info_get, struct rte_eth_txconf::txq_flags
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the transmit queue through which output packets must be
+ * sent.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param tx_pkts
+ * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
+ * which contain the output packets.
+ * @param nb_pkts
+ * The maximum number of packets to transmit.
+ * @return
+ * The number of output packets actually stored in transmit descriptors of
+ * the transmit ring. The return value can be less than the value of the
+ * *tx_pkts* parameter when the transmit ring is full or has been filled up.
+ */
+static inline uint16_t
+rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
+
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ return 0;
+ }
+#endif
+
+#ifdef RTE_ETHDEV_RXTX_CALLBACKS
+ struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
+
+ if (unlikely(cb != NULL)) {
+ do {
+ nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
+ cb->param);
+ cb = cb->next;
+ } while (cb != NULL);
+ }
+#endif
+
+ return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Process a burst of output packets on a transmit queue of an Ethernet device.
+ *
+ * The rte_eth_tx_prepare() function is invoked to prepare output packets to be
+ * transmitted on the output queue *queue_id* of the Ethernet device designated
+ * by its *port_id*.
+ * The *nb_pkts* parameter is the number of packets to be prepared which are
+ * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
+ * allocated from a pool created with rte_pktmbuf_pool_create().
+ * For each packet to send, the rte_eth_tx_prepare() function performs
+ * the following operations:
+ *
+ * - Check if packet meets devices requirements for tx offloads.
+ *
+ * - Check limitations about number of segments.
+ *
+ * - Check additional requirements when debug is enabled.
+ *
+ * - Update and/or reset required checksums when tx offload is set for packet.
+ *
+ * Since this function can modify packet data, provided mbufs must be safely
+ * writable (e.g. modified data cannot be in shared segment).
+ *
+ * The rte_eth_tx_prepare() function returns the number of packets ready to be
+ * sent. A return value equal to *nb_pkts* means that all packets are valid and
+ * ready to be sent, otherwise stops processing on the first invalid packet and
+ * leaves the rest packets untouched.
+ *
+ * When this functionality is not implemented in the driver, all packets are
+ * are returned untouched.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * The value must be a valid port id.
+ * @param queue_id
+ * The index of the transmit queue through which output packets must be
+ * sent.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param tx_pkts
+ * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
+ * which contain the output packets.
+ * @param nb_pkts
+ * The maximum number of packets to process.
+ * @return
+ * The number of packets correct and ready to be sent. The return value can be
+ * less than the value of the *tx_pkts* parameter when some packet doesn't
+ * meet devices requirements with rte_errno set appropriately:
+ * - -EINVAL: offload flags are not correctly set
+ * - -ENOTSUP: the offload feature is not supported by the hardware
+ *
+ */
+
+#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
+
+static inline uint16_t
+rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ if (!dev->tx_pkt_prepare)
+ return nb_pkts;
+
+ return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
+ tx_pkts, nb_pkts);
+}
+
+#else
+
+/*
+ * Native NOOP operation for compilation targets which doesn't require any
+ * preparations steps, and functional NOOP may introduce unnecessary performance
+ * drop.
+ *
+ * Generally this is not a good idea to turn it on globally and didn't should
+ * be used if behavior of tx_preparation can change.
+ */
+
+static inline uint16_t
+rte_eth_tx_prepare(__rte_unused uint16_t port_id,
+ __rte_unused uint16_t queue_id,
+ __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ return nb_pkts;
+}
+
+#endif
+
+/**
+ * Send any packets queued up for transmission on a port and HW queue
+ *
+ * This causes an explicit flush of packets previously buffered via the
+ * rte_eth_tx_buffer() function. It returns the number of packets successfully
+ * sent to the NIC, and calls the error callback for any unsent packets. Unless
+ * explicitly set up otherwise, the default callback simply frees the unsent
+ * packets back to the owning mempool.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the transmit queue through which output packets must be
+ * sent.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param buffer
+ * Buffer of packets to be transmit.
+ * @return
+ * The number of packets successfully sent to the Ethernet device. The error
+ * callback is called for any packets which could not be sent.
+ */
+static inline uint16_t
+rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
+ struct rte_eth_dev_tx_buffer *buffer)
+{
+ uint16_t sent;
+ uint16_t to_send = buffer->length;
+
+ if (to_send == 0)
+ return 0;
+
+ sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
+
+ buffer->length = 0;
+
+ /* All packets sent, or to be dealt with by callback below */
+ if (unlikely(sent != to_send))
+ buffer->error_callback(&buffer->pkts[sent], to_send - sent,
+ buffer->error_userdata);
+
+ return sent;
+}
+
+/**
+ * Buffer a single packet for future transmission on a port and queue
+ *
+ * This function takes a single mbuf/packet and buffers it for later
+ * transmission on the particular port and queue specified. Once the buffer is
+ * full of packets, an attempt will be made to transmit all the buffered
+ * packets. In case of error, where not all packets can be transmitted, a
+ * callback is called with the unsent packets as a parameter. If no callback
+ * is explicitly set up, the unsent packets are just freed back to the owning
+ * mempool. The function returns the number of packets actually sent i.e.
+ * 0 if no buffer flush occurred, otherwise the number of packets successfully
+ * flushed
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the transmit queue through which output packets must be
+ * sent.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param buffer
+ * Buffer used to collect packets to be sent.
+ * @param tx_pkt
+ * Pointer to the packet mbuf to be sent.
+ * @return
+ * 0 = packet has been buffered for later transmission
+ * N > 0 = packet has been buffered, and the buffer was subsequently flushed,
+ * causing N packets to be sent, and the error callback to be called for
+ * the rest.
+ */
+static __rte_always_inline uint16_t
+rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
+ struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
+{
+ buffer->pkts[buffer->length++] = tx_pkt;
+ if (buffer->length < buffer->size)
+ return 0;
+
+ return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_ethdev_core.h b/lib/librte_ether/rte_ethdev_core.h
new file mode 100644
index 00000000..e5681e46
--- /dev/null
+++ b/lib/librte_ether/rte_ethdev_core.h
@@ -0,0 +1,613 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_ETHDEV_CORE_H_
+#define _RTE_ETHDEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE Ethernet Device internal header.
+ *
+ * This header contains internal data types. But they are still part of the
+ * public API because they are used by inline functions in the published API.
+ *
+ * Applications should not use these directly.
+ *
+ */
+
+struct rte_eth_dev_callback;
+/** @internal Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
+
+/*
+ * Definitions of all functions exported by an Ethernet driver through the
+ * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
+ * structure associated with an Ethernet device.
+ */
+struct rte_eth_dev;
+
+typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
+/**< @internal Ethernet device configuration. */
+
+typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to start a configured Ethernet device. */
+
+typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to stop a configured Ethernet device. */
+
+typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to link up a configured Ethernet device. */
+
+typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to link down a configured Ethernet device. */
+
+typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to close a configured Ethernet device. */
+
+typedef int (*eth_dev_reset_t)(struct rte_eth_dev *dev);
+/** <@internal Function used to reset a configured Ethernet device. */
+
+typedef int (*eth_is_removed_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to detect an Ethernet device removal. */
+
+typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to enable the RX promiscuous mode of an Ethernet device. */
+
+typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to disable the RX promiscuous mode of an Ethernet device. */
+
+typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
+/**< @internal Enable the receipt of all multicast packets by an Ethernet device. */
+
+typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
+/**< @internal Disable the receipt of all multicast packets by an Ethernet device. */
+
+typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
+ int wait_to_complete);
+/**< @internal Get link speed, duplex mode and state (up/down) of an Ethernet device. */
+
+typedef int (*eth_stats_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_stats *igb_stats);
+/**< @internal Get global I/O statistics of an Ethernet device. */
+
+typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
+/**< @internal Reset global I/O statistics of an Ethernet device to 0. */
+
+typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats, unsigned n);
+/**< @internal Get extended stats of an Ethernet device. */
+
+typedef int (*eth_xstats_get_by_id_t)(struct rte_eth_dev *dev,
+ const uint64_t *ids,
+ uint64_t *values,
+ unsigned int n);
+/**< @internal Get extended stats of an Ethernet device. */
+
+typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
+/**< @internal Reset extended stats of an Ethernet device. */
+
+typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned size);
+/**< @internal Get names of extended stats of an Ethernet device. */
+
+typedef int (*eth_xstats_get_names_by_id_t)(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int size);
+/**< @internal Get names of extended stats of an Ethernet device. */
+
+typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+/**< @internal Set a queue statistics mapping for a tx/rx queue of an Ethernet device. */
+
+typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+/**< @internal Get specific informations of an Ethernet device. */
+
+typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
+/**< @internal Get supported ptypes of an Ethernet device. */
+
+typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+/**< @internal Start rx and tx of a queue of an Ethernet device. */
+
+typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+/**< @internal Stop rx and tx of a queue of an Ethernet device. */
+
+typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+/**< @internal Set up a receive queue of an Ethernet device. */
+
+typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+/**< @internal Setup a transmit queue of an Ethernet device. */
+
+typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+/**< @internal Enable interrupt of a receive queue of an Ethernet device. */
+
+typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+/**< @internal Disable interrupt of a receive queue of an Ethernet device. */
+
+typedef void (*eth_queue_release_t)(void *queue);
+/**< @internal Release memory resources allocated by given RX/TX queue. */
+
+typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+/**< @internal Get number of used descriptors on a receive queue. */
+
+typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
+/**< @internal Check DD bit of specific RX descriptor */
+
+typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);
+/**< @internal Check the status of a Rx descriptor */
+
+typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset);
+/**< @internal Check the status of a Tx descriptor */
+
+typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
+/**< @internal Get firmware information of an Ethernet device. */
+
+typedef int (*eth_tx_done_cleanup_t)(void *txq, uint32_t free_cnt);
+/**< @internal Force mbufs to be from TX ring. */
+
+typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
+
+typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
+
+typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
+/**< @internal Set MTU. */
+
+typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
+ uint16_t vlan_id,
+ int on);
+/**< @internal filtering of a VLAN Tag Identifier by an Ethernet device. */
+
+typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
+ enum rte_vlan_type type, uint16_t tpid);
+/**< @internal set the outer/inner VLAN-TPID by an Ethernet device. */
+
+typedef int (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
+/**< @internal set VLAN offload function by an Ethernet device. */
+
+typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
+ uint16_t vlan_id,
+ int on);
+/**< @internal set port based TX VLAN insertion by an Ethernet device. */
+
+typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ int on);
+/**< @internal VLAN stripping enable/disable by an queue of Ethernet device. */
+
+typedef uint16_t (*eth_rx_burst_t)(void *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+/**< @internal Retrieve input packets from a receive queue of an Ethernet device. */
+
+typedef uint16_t (*eth_tx_burst_t)(void *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+/**< @internal Send output packets on a transmit queue of an Ethernet device. */
+
+typedef uint16_t (*eth_tx_prep_t)(void *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+/**< @internal Prepare output packets on a transmit queue of an Ethernet device. */
+
+typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+/**< @internal Get current flow control parameter on an Ethernet device */
+
+typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+/**< @internal Setup flow control parameter on an Ethernet device */
+
+typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
+/**< @internal Setup priority flow control parameter on an Ethernet device */
+
+typedef int (*reta_update_t)(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+/**< @internal Update RSS redirection table on an Ethernet device */
+
+typedef int (*reta_query_t)(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+/**< @internal Query RSS redirection table on an Ethernet device */
+
+typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+/**< @internal Update RSS hash configuration of an Ethernet device */
+
+typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+/**< @internal Get current RSS hash configuration of an Ethernet device */
+
+typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
+/**< @internal Turn on SW controllable LED on an Ethernet device */
+
+typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
+/**< @internal Turn off SW controllable LED on an Ethernet device */
+
+typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
+/**< @internal Remove MAC address from receive address register */
+
+typedef int (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t vmdq);
+/**< @internal Set a MAC address into Receive Address Address Register */
+
+typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+/**< @internal Set a MAC address into Receive Address Address Register */
+
+typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint8_t on);
+/**< @internal Set a Unicast Hash bitmap */
+
+typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
+ uint8_t on);
+/**< @internal Set all Unicast Hash bitmap */
+
+typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t tx_rate);
+/**< @internal Set queue TX rate */
+
+typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id,
+ uint8_t on);
+/**< @internal Add a traffic mirroring rule on an Ethernet device */
+
+typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
+ uint8_t rule_id);
+/**< @internal Remove a traffic mirroring rule on an Ethernet device */
+
+typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+/**< @internal Add tunneling UDP port */
+
+typedef int (*eth_udp_tunnel_port_del_t)(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+/**< @internal Delete tunneling UDP port */
+
+typedef int (*eth_set_mc_addr_list_t)(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+/**< @internal set the list of multicast addresses on an Ethernet device */
+
+typedef int (*eth_timesync_enable_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to enable IEEE1588/802.1AS timestamping. */
+
+typedef int (*eth_timesync_disable_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to disable IEEE1588/802.1AS timestamping. */
+
+typedef int (*eth_timesync_read_rx_timestamp_t)(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+/**< @internal Function used to read an RX IEEE1588/802.1AS timestamp. */
+
+typedef int (*eth_timesync_read_tx_timestamp_t)(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+/**< @internal Function used to read a TX IEEE1588/802.1AS timestamp. */
+
+typedef int (*eth_timesync_adjust_time)(struct rte_eth_dev *dev, int64_t);
+/**< @internal Function used to adjust the device clock */
+
+typedef int (*eth_timesync_read_time)(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+/**< @internal Function used to get time from the device clock. */
+
+typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+/**< @internal Function used to get time from the device clock */
+
+typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *info);
+/**< @internal Retrieve registers */
+
+typedef int (*eth_get_eeprom_length_t)(struct rte_eth_dev *dev);
+/**< @internal Retrieve eeprom size */
+
+typedef int (*eth_get_eeprom_t)(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+/**< @internal Retrieve eeprom data */
+
+typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+/**< @internal Program eeprom data */
+
+typedef int (*eth_l2_tunnel_eth_type_conf_t)
+ (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
+/**< @internal config l2 tunnel ether type */
+
+typedef int (*eth_l2_tunnel_offload_set_t)
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ uint32_t mask,
+ uint8_t en);
+/**< @internal enable/disable the l2 tunnel offload functions */
+
+
+typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+/**< @internal Take operations to assigned filter type on an Ethernet device */
+
+typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
+/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
+
+typedef int (*eth_mtr_ops_get_t)(struct rte_eth_dev *dev, void *ops);
+/**< @internal Get Trafffic Metering and Policing (MTR) operations */
+
+typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
+/**< @internal Get dcb information on an Ethernet device */
+
+typedef int (*eth_pool_ops_supported_t)(struct rte_eth_dev *dev,
+ const char *pool);
+/**< @internal Test if a port supports specific mempool ops */
+
+/**
+ * @internal A structure containing the functions exported by an Ethernet driver.
+ */
+struct eth_dev_ops {
+ eth_dev_configure_t dev_configure; /**< Configure device. */
+ eth_dev_start_t dev_start; /**< Start device. */
+ eth_dev_stop_t dev_stop; /**< Stop device. */
+ eth_dev_set_link_up_t dev_set_link_up; /**< Device link up. */
+ eth_dev_set_link_down_t dev_set_link_down; /**< Device link down. */
+ eth_dev_close_t dev_close; /**< Close device. */
+ eth_dev_reset_t dev_reset; /**< Reset device. */
+ eth_link_update_t link_update; /**< Get device link state. */
+ eth_is_removed_t is_removed;
+ /**< Check if the device was physically removed. */
+
+ eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */
+ eth_promiscuous_disable_t promiscuous_disable;/**< Promiscuous OFF. */
+ eth_allmulticast_enable_t allmulticast_enable;/**< RX multicast ON. */
+ eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OFF. */
+ eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address. */
+ eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address. */
+ eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address. */
+ eth_set_mc_addr_list_t set_mc_addr_list; /**< set list of mcast addrs. */
+ mtu_set_t mtu_set; /**< Set MTU. */
+
+ eth_stats_get_t stats_get; /**< Get generic device statistics. */
+ eth_stats_reset_t stats_reset; /**< Reset generic device statistics. */
+ eth_xstats_get_t xstats_get; /**< Get extended device statistics. */
+ eth_xstats_reset_t xstats_reset; /**< Reset extended device statistics. */
+ eth_xstats_get_names_t xstats_get_names;
+ /**< Get names of extended statistics. */
+ eth_queue_stats_mapping_set_t queue_stats_mapping_set;
+ /**< Configure per queue stat counter mapping. */
+
+ eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
+ eth_rxq_info_get_t rxq_info_get; /**< retrieve RX queue information. */
+ eth_txq_info_get_t txq_info_get; /**< retrieve TX queue information. */
+ eth_fw_version_get_t fw_version_get; /**< Get firmware version. */
+ eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
+ /**< Get packet types supported and identified by device. */
+
+ vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
+ vlan_tpid_set_t vlan_tpid_set; /**< Outer/Inner VLAN TPID Setup. */
+ vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */
+ vlan_offload_set_t vlan_offload_set; /**< Set VLAN Offload. */
+ vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion. */
+
+ eth_queue_start_t rx_queue_start;/**< Start RX for a queue. */
+ eth_queue_stop_t rx_queue_stop; /**< Stop RX for a queue. */
+ eth_queue_start_t tx_queue_start;/**< Start TX for a queue. */
+ eth_queue_stop_t tx_queue_stop; /**< Stop TX for a queue. */
+ eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue. */
+ eth_queue_release_t rx_queue_release; /**< Release RX queue. */
+ eth_rx_queue_count_t rx_queue_count;
+ /**< Get the number of used RX descriptors. */
+ eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
+ eth_rx_descriptor_status_t rx_descriptor_status;
+ /**< Check the status of a Rx descriptor. */
+ eth_tx_descriptor_status_t tx_descriptor_status;
+ /**< Check the status of a Tx descriptor. */
+ eth_rx_enable_intr_t rx_queue_intr_enable; /**< Enable Rx queue interrupt. */
+ eth_rx_disable_intr_t rx_queue_intr_disable; /**< Disable Rx queue interrupt. */
+ eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue. */
+ eth_queue_release_t tx_queue_release; /**< Release TX queue. */
+ eth_tx_done_cleanup_t tx_done_cleanup;/**< Free tx ring mbufs */
+
+ eth_dev_led_on_t dev_led_on; /**< Turn on LED. */
+ eth_dev_led_off_t dev_led_off; /**< Turn off LED. */
+
+ flow_ctrl_get_t flow_ctrl_get; /**< Get flow control. */
+ flow_ctrl_set_t flow_ctrl_set; /**< Setup flow control. */
+ priority_flow_ctrl_set_t priority_flow_ctrl_set; /**< Setup priority flow control. */
+
+ eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array. */
+ eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap. */
+
+ eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule. */
+ eth_mirror_rule_reset_t mirror_rule_reset; /**< reset a traffic mirror rule. */
+
+ eth_udp_tunnel_port_add_t udp_tunnel_port_add; /** Add UDP tunnel port. */
+ eth_udp_tunnel_port_del_t udp_tunnel_port_del; /** Del UDP tunnel port. */
+ eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
+ /** Config ether type of l2 tunnel. */
+ eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
+ /** Enable/disable l2 tunnel offload functions. */
+
+ eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit. */
+
+ rss_hash_update_t rss_hash_update; /** Configure RSS hash protocols. */
+ rss_hash_conf_get_t rss_hash_conf_get; /** Get current RSS hash configuration. */
+ reta_update_t reta_update; /** Update redirection table. */
+ reta_query_t reta_query; /** Query redirection table. */
+
+ eth_get_reg_t get_reg; /**< Get registers. */
+ eth_get_eeprom_length_t get_eeprom_length; /**< Get eeprom length. */
+ eth_get_eeprom_t get_eeprom; /**< Get eeprom data. */
+ eth_set_eeprom_t set_eeprom; /**< Set eeprom. */
+
+
+ eth_filter_ctrl_t filter_ctrl; /**< common filter control. */
+
+ eth_get_dcb_info get_dcb_info; /** Get DCB information. */
+
+ eth_timesync_enable_t timesync_enable;
+ /** Turn IEEE1588/802.1AS timestamping on. */
+ eth_timesync_disable_t timesync_disable;
+ /** Turn IEEE1588/802.1AS timestamping off. */
+ eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
+ /** Read the IEEE1588/802.1AS RX timestamp. */
+ eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
+ /** Read the IEEE1588/802.1AS TX timestamp. */
+ eth_timesync_adjust_time timesync_adjust_time; /** Adjust the device clock. */
+ eth_timesync_read_time timesync_read_time; /** Get the device clock time. */
+ eth_timesync_write_time timesync_write_time; /** Set the device clock time. */
+
+ eth_xstats_get_by_id_t xstats_get_by_id;
+ /**< Get extended device statistic values by ID. */
+ eth_xstats_get_names_by_id_t xstats_get_names_by_id;
+ /**< Get name of extended device statistics by ID. */
+
+ eth_tm_ops_get_t tm_ops_get;
+ /**< Get Traffic Management (TM) operations. */
+
+ eth_mtr_ops_get_t mtr_ops_get;
+ /**< Get Traffic Metering and Policing (MTR) operations. */
+
+ eth_pool_ops_supported_t pool_ops_supported;
+ /**< Test if a port supports specific mempool ops */
+};
+
+/**
+ * @internal
+ * Structure used to hold information about the callbacks to be called for a
+ * queue on RX and TX.
+ */
+struct rte_eth_rxtx_callback {
+ struct rte_eth_rxtx_callback *next;
+ union{
+ rte_rx_callback_fn rx;
+ rte_tx_callback_fn tx;
+ } fn;
+ void *param;
+};
+
+/**
+ * @internal
+ * The generic data structure associated with each ethernet device.
+ *
+ * Pointers to burst-oriented packet receive and transmit functions are
+ * located at the beginning of the structure, along with the pointer to
+ * where all the data elements for the particular device are stored in shared
+ * memory. This split allows the function pointer and driver data to be per-
+ * process, while the actual configuration data for the device is shared.
+ */
+struct rte_eth_dev {
+ eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
+ eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
+ eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
+ struct rte_eth_dev_data *data; /**< Pointer to device data */
+ const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */
+ struct rte_device *device; /**< Backing device */
+ struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
+ /** User application callbacks for NIC interrupts */
+ struct rte_eth_dev_cb_list link_intr_cbs;
+ /**
+ * User-supplied functions called from rx_burst to post-process
+ * received packets before passing them to the user
+ */
+ struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ /**
+ * User-supplied functions called from tx_burst to pre-process
+ * received packets before passing them to the driver for transmission.
+ */
+ struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ enum rte_eth_dev_state state; /**< Flag indicating the port state */
+ void *security_ctx; /**< Context for security ops */
+} __rte_cache_aligned;
+
+struct rte_eth_dev_sriov;
+struct rte_eth_dev_owner;
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each ethernet device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_eth_dev_data {
+ char name[RTE_ETH_NAME_MAX_LEN]; /**< Unique identifier name */
+
+ void **rx_queues; /**< Array of pointers to RX queues. */
+ void **tx_queues; /**< Array of pointers to TX queues. */
+ uint16_t nb_rx_queues; /**< Number of RX queues. */
+ uint16_t nb_tx_queues; /**< Number of TX queues. */
+
+ struct rte_eth_dev_sriov sriov; /**< SRIOV data */
+
+ void *dev_private; /**< PMD-specific private data */
+
+ struct rte_eth_link dev_link;
+ /**< Link-level information & status */
+
+ struct rte_eth_conf dev_conf; /**< Configuration applied to device. */
+ uint16_t mtu; /**< Maximum Transmission Unit. */
+
+ uint32_t min_rx_buf_size;
+ /**< Common rx buffer size handled by all queues */
+
+ uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */
+ struct ether_addr* mac_addrs;/**< Device Ethernet Link address. */
+ uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+ /** bitmap array of associating Ethernet MAC addresses to pools */
+ struct ether_addr* hash_mac_addrs;
+ /** Device Ethernet MAC addresses of hash filtering. */
+ uint16_t port_id; /**< Device [external] port identifier. */
+ __extension__
+ uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
+ scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
+ all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
+ dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */
+ lro : 1; /**< RX LRO is ON(1) / OFF(0) */
+ uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
+ /** Queues state: STARTED(1) / STOPPED(0) */
+ uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
+ /** Queues state: STARTED(1) / STOPPED(0) */
+ uint32_t dev_flags; /**< Capabilities */
+ enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
+ int numa_node; /**< NUMA node connection */
+ struct rte_vlan_filter_conf vlan_filter_conf;
+ /**< VLAN filter configuration. */
+ struct rte_eth_dev_owner owner; /**< The port owner. */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The pool of *rte_eth_dev* structures. The size of the pool
+ * is configured at compile-time in the <rte_ethdev.c> file.
+ */
+extern struct rte_eth_dev rte_eth_devices[];
+
+#endif /* _RTE_ETHDEV_CORE_H_ */
diff --git a/lib/librte_ether/rte_ethdev_driver.h b/lib/librte_ether/rte_ethdev_driver.h
new file mode 100644
index 00000000..45f08c65
--- /dev/null
+++ b/lib/librte_ether/rte_ethdev_driver.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_ETHDEV_DRIVER_H_
+#define _RTE_ETHDEV_DRIVER_H_
+
+/**
+ * @file
+ *
+ * RTE Ethernet Device PMD API
+ *
+ * These APIs for the use from Ethernet drivers, user applications shouldn't
+ * use them.
+ *
+ */
+
+#include <rte_ethdev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Returns a ethdev slot specified by the unique identifier name.
+ *
+ * @param name
+ * The pointer to the Unique identifier name for each Ethernet device
+ * @return
+ * - The pointer to the ethdev slot, on success. NULL on error
+ */
+struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
+
+/**
+ * @internal
+ * Allocates a new ethdev slot for an ethernet device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name Unique identifier name for each Ethernet device
+ * @param type Device type of this Ethernet device
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
+
+/**
+ * @internal
+ * Attach to the ethdev already initialized by the primary
+ * process.
+ *
+ * @param name Ethernet device's name.
+ * @return
+ * - Success: Slot in the rte_dev_devices array for attached
+ * device.
+ * - Error: Null pointer.
+ */
+struct rte_eth_dev *rte_eth_dev_attach_secondary(const char *name);
+
+/**
+ * @internal
+ * Release the specified ethdev port.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
+
+/**
+ * @internal
+ * Release device queues and clear its configuration to force the user
+ * application to reconfigure it. It is for internal use only.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * void
+ */
+void _rte_eth_dev_reset(struct rte_eth_dev *dev);
+
+/**
+ * @internal Executes all the user application registered callbacks for
+ * the specific device. It is for DPDK internal user only. User
+ * application should not call it directly.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param event
+ * Eth device interrupt event type.
+ * @param ret_param
+ * To pass data back to user application.
+ * This allows the user application to decide if a particular function
+ * is permitted or not.
+ *
+ * @return
+ * int
+ */
+int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event, void *ret_param);
+
+/**
+ * Create memzone for HW rings.
+ * malloc can't be used as the physical address is needed.
+ * If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * @param name
+ * The name of the memory zone
+ * @param queue_id
+ * The index of the queue to add to name
+ * @param size
+ * The sizeof of the memory area
+ * @param align
+ * Alignment for resulting memzone. Must be a power of 2.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of NUMA.
+ */
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
+ uint16_t queue_id, size_t size,
+ unsigned align, int socket_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ETHDEV_DRIVER_H_ */
diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ether/rte_ethdev_pci.h
index 722075e0..897ce5b4 100644
--- a/lib/librte_ether/rte_ethdev_pci.h
+++ b/lib/librte_ether/rte_ethdev_pci.h
@@ -37,7 +37,8 @@
#include <rte_malloc.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_ethdev.h>
+#include <rte_config.h>
+#include <rte_ethdev_driver.h>
/**
* Copy pci device info to the Ethernet device data.
diff --git a/lib/librte_ether/rte_ethdev_vdev.h b/lib/librte_ether/rte_ethdev_vdev.h
index ff92e6ed..259feda3 100644
--- a/lib/librte_ether/rte_ethdev_vdev.h
+++ b/lib/librte_ether/rte_ethdev_vdev.h
@@ -34,9 +34,10 @@
#ifndef _RTE_ETHDEV_VDEV_H_
#define _RTE_ETHDEV_VDEV_H_
+#include <rte_config.h>
#include <rte_malloc.h>
#include <rte_bus_vdev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
/**
* @internal
diff --git a/lib/librte_ether/rte_ethdev_version.map b/lib/librte_ether/rte_ethdev_version.map
index e9681ac8..87f02fb7 100644
--- a/lib/librte_ether/rte_ethdev_version.map
+++ b/lib/librte_ether/rte_ethdev_version.map
@@ -156,7 +156,6 @@ DPDK_17.08 {
rte_flow_copy;
rte_flow_isolate;
rte_tm_capabilities_get;
- rte_tm_get_leaf_nodes;
rte_tm_hierarchy_commit;
rte_tm_level_capabilities_get;
rte_tm_mark_ip_dscp;
@@ -198,9 +197,25 @@ DPDK_17.11 {
} DPDK_17.08;
+DPDK_18.02 {
+ global:
+
+ rte_eth_dev_filter_ctrl;
+
+} DPDK_17.11;
+
EXPERIMENTAL {
global:
+ rte_eth_dev_is_removed;
+ rte_eth_dev_owner_delete;
+ rte_eth_dev_owner_get;
+ rte_eth_dev_owner_new;
+ rte_eth_dev_owner_set;
+ rte_eth_dev_owner_unset;
+ rte_eth_dev_rx_offload_name;
+ rte_eth_dev_tx_offload_name;
+ rte_eth_find_next_owned_by;
rte_mtr_capabilities_get;
rte_mtr_create;
rte_mtr_destroy;
diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ether/rte_flow.c
index 66590630..38f2d27b 100644
--- a/lib/librte_ether/rte_flow.c
+++ b/lib/librte_ether/rte_flow.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
*/
#include <errno.h>
@@ -81,6 +53,7 @@ static const struct rte_flow_desc_data rte_flow_desc_item[] = {
MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
+ MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
};
/** Generate flow_action[] entry. */
@@ -106,6 +79,18 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = {
MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
};
+static int
+flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
+{
+ if (ret == 0)
+ return 0;
+ if (rte_eth_dev_is_removed(port_id))
+ return rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(EIO));
+ return ret;
+}
+
/* Get generic flow operations structure from a port. */
const struct rte_flow_ops *
rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
@@ -144,7 +129,8 @@ rte_flow_validate(uint16_t port_id,
if (unlikely(!ops))
return -rte_errno;
if (likely(!!ops->validate))
- return ops->validate(dev, attr, pattern, actions, error);
+ return flow_err(port_id, ops->validate(dev, attr, pattern,
+ actions, error), error);
return rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
@@ -159,12 +145,17 @@ rte_flow_create(uint16_t port_id,
struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct rte_flow *flow;
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
if (unlikely(!ops))
return NULL;
- if (likely(!!ops->create))
- return ops->create(dev, attr, pattern, actions, error);
+ if (likely(!!ops->create)) {
+ flow = ops->create(dev, attr, pattern, actions, error);
+ if (flow == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return flow;
+ }
rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
return NULL;
@@ -182,7 +173,8 @@ rte_flow_destroy(uint16_t port_id,
if (unlikely(!ops))
return -rte_errno;
if (likely(!!ops->destroy))
- return ops->destroy(dev, flow, error);
+ return flow_err(port_id, ops->destroy(dev, flow, error),
+ error);
return rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
@@ -199,7 +191,7 @@ rte_flow_flush(uint16_t port_id,
if (unlikely(!ops))
return -rte_errno;
if (likely(!!ops->flush))
- return ops->flush(dev, error);
+ return flow_err(port_id, ops->flush(dev, error), error);
return rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
@@ -219,7 +211,8 @@ rte_flow_query(uint16_t port_id,
if (!ops)
return -rte_errno;
if (likely(!!ops->query))
- return ops->query(dev, flow, action, data, error);
+ return flow_err(port_id, ops->query(dev, flow, action, data,
+ error), error);
return rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
@@ -237,7 +230,7 @@ rte_flow_isolate(uint16_t port_id,
if (!ops)
return -rte_errno;
if (likely(!!ops->isolate))
- return ops->isolate(dev, set, error);
+ return flow_err(port_id, ops->isolate(dev, set, error), error);
return rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index 47c88ea5..13e42021 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
*/
#ifndef RTE_FLOW_H_
@@ -344,6 +316,13 @@ enum rte_flow_item_type {
* See struct rte_flow_item_esp.
*/
RTE_FLOW_ITEM_TYPE_ESP,
+
+ /**
+ * Matches a GENEVE header.
+ *
+ * See struct rte_flow_item_geneve.
+ */
+ RTE_FLOW_ITEM_TYPE_GENEVE,
};
/**
@@ -813,6 +792,29 @@ static const struct rte_flow_item_esp rte_flow_item_esp_mask = {
#endif
/**
+ * RTE_FLOW_ITEM_TYPE_GENEVE.
+ *
+ * Matches a GENEVE header.
+ */
+struct rte_flow_item_geneve {
+ /**
+ * Version (2b), length of the options fields (6b), OAM packet (1b),
+ * critical options present (1b), reserved 0 (6b).
+ */
+ rte_be16_t ver_opt_len_o_c_rsvd0;
+ rte_be16_t protocol; /**< Protocol type. */
+ uint8_t vni[3]; /**< Virtual Network Identifier. */
+ uint8_t rsvd1; /**< Reserved, normally 0x00. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GENEVE. */
+#ifndef __cplusplus
+static const struct rte_flow_item_geneve rte_flow_item_geneve_mask = {
+ .vni = "\xff\xff\xff",
+};
+#endif
+
+/**
* Matching pattern item definition.
*
* A pattern is formed by stacking items starting from the lowest protocol
@@ -1237,6 +1239,8 @@ struct rte_flow_error {
*
* -ENOSYS: underlying device does not support this functionality.
*
+ * -EIO: underlying device is removed.
+ *
* -EINVAL: unknown or invalid rule specification.
*
* -ENOTSUP: valid but unsupported rule specification (e.g. partial
diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ether/rte_flow_driver.h
index 254d1cb2..7778c8e0 100644
--- a/lib/librte_ether/rte_flow_driver.h
+++ b/lib/librte_ether/rte_flow_driver.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
*/
#ifndef RTE_FLOW_DRIVER_H_
diff --git a/lib/librte_ether/rte_mtr.c b/lib/librte_ether/rte_mtr.c
index 4f56f871..1046cb5f 100644
--- a/lib/librte_ether/rte_mtr.c
+++ b/lib/librte_ether/rte_mtr.c
@@ -1,39 +1,11 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <stdint.h>
#include <rte_errno.h>
+#include "rte_compat.h"
#include "rte_ethdev.h"
#include "rte_mtr_driver.h"
#include "rte_mtr.h"
@@ -86,7 +58,7 @@ rte_mtr_ops_get(uint16_t port_id, struct rte_mtr_error *error)
})
/* MTR capabilities get */
-int
+int __rte_experimental
rte_mtr_capabilities_get(uint16_t port_id,
struct rte_mtr_capabilities *cap,
struct rte_mtr_error *error)
@@ -97,7 +69,7 @@ rte_mtr_capabilities_get(uint16_t port_id,
}
/* MTR meter profile add */
-int
+int __rte_experimental
rte_mtr_meter_profile_add(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_meter_profile *profile,
@@ -109,7 +81,7 @@ rte_mtr_meter_profile_add(uint16_t port_id,
}
/** MTR meter profile delete */
-int
+int __rte_experimental
rte_mtr_meter_profile_delete(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_error *error)
@@ -120,7 +92,7 @@ rte_mtr_meter_profile_delete(uint16_t port_id,
}
/** MTR object create */
-int
+int __rte_experimental
rte_mtr_create(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_params *params,
@@ -133,7 +105,7 @@ rte_mtr_create(uint16_t port_id,
}
/** MTR object destroy */
-int
+int __rte_experimental
rte_mtr_destroy(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
@@ -144,7 +116,7 @@ rte_mtr_destroy(uint16_t port_id,
}
/** MTR object meter enable */
-int
+int __rte_experimental
rte_mtr_meter_enable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
@@ -155,7 +127,7 @@ rte_mtr_meter_enable(uint16_t port_id,
}
/** MTR object meter disable */
-int
+int __rte_experimental
rte_mtr_meter_disable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
@@ -166,7 +138,7 @@ rte_mtr_meter_disable(uint16_t port_id,
}
/** MTR object meter profile update */
-int
+int __rte_experimental
rte_mtr_meter_profile_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t meter_profile_id,
@@ -178,7 +150,7 @@ rte_mtr_meter_profile_update(uint16_t port_id,
}
/** MTR object meter DSCP table update */
-int
+int __rte_experimental
rte_mtr_meter_dscp_table_update(uint16_t port_id,
uint32_t mtr_id,
enum rte_mtr_color *dscp_table,
@@ -190,7 +162,7 @@ rte_mtr_meter_dscp_table_update(uint16_t port_id,
}
/** MTR object policer action update */
-int
+int __rte_experimental
rte_mtr_policer_actions_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t action_mask,
@@ -203,7 +175,7 @@ rte_mtr_policer_actions_update(uint16_t port_id,
}
/** MTR object enabled stats update */
-int
+int __rte_experimental
rte_mtr_stats_update(uint16_t port_id,
uint32_t mtr_id,
uint64_t stats_mask,
@@ -215,7 +187,7 @@ rte_mtr_stats_update(uint16_t port_id,
}
/** MTR object stats read */
-int
+int __rte_experimental
rte_mtr_stats_read(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_stats *stats,
diff --git a/lib/librte_ether/rte_mtr.h b/lib/librte_ether/rte_mtr.h
index f6b6ef3b..c4819b27 100644
--- a/lib/librte_ether/rte_mtr.h
+++ b/lib/librte_ether/rte_mtr.h
@@ -74,7 +74,7 @@
* @b EXPERIMENTAL: this API may change without prior notice
*/
#include <stdint.h>
-
+#include <rte_compat.h>
#include <rte_common.h>
#ifdef __cplusplus
@@ -447,7 +447,7 @@ struct rte_mtr_error {
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_capabilities_get(uint16_t port_id,
struct rte_mtr_capabilities *cap,
struct rte_mtr_error *error);
@@ -470,7 +470,7 @@ rte_mtr_capabilities_get(uint16_t port_id,
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_meter_profile_add(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_meter_profile *profile,
@@ -491,7 +491,7 @@ rte_mtr_meter_profile_add(uint16_t port_id,
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_meter_profile_delete(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_error *error);
@@ -519,7 +519,7 @@ rte_mtr_meter_profile_delete(uint16_t port_id,
*
* @see enum rte_flow_action_type::RTE_FLOW_ACTION_TYPE_METER
*/
-int
+int __rte_experimental
rte_mtr_create(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_params *params,
@@ -542,7 +542,7 @@ rte_mtr_create(uint16_t port_id,
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_destroy(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error);
@@ -569,7 +569,7 @@ rte_mtr_destroy(uint16_t port_id,
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_meter_disable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error);
@@ -590,7 +590,7 @@ rte_mtr_meter_disable(uint16_t port_id,
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_meter_enable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error);
@@ -609,7 +609,7 @@ rte_mtr_meter_enable(uint16_t port_id,
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_meter_profile_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t meter_profile_id,
@@ -633,7 +633,7 @@ rte_mtr_meter_profile_update(uint16_t port_id,
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_meter_dscp_table_update(uint16_t port_id,
uint32_t mtr_id,
enum rte_mtr_color *dscp_table,
@@ -659,7 +659,7 @@ rte_mtr_meter_dscp_table_update(uint16_t port_id,
* @return
* 0 on success, non-zero error code otherwise.
*/
-int
+int __rte_experimental
rte_mtr_policer_actions_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t action_mask,
@@ -684,7 +684,7 @@ rte_mtr_policer_actions_update(uint16_t port_id,
*
* @see enum rte_mtr_stats_type
*/
-int
+int __rte_experimental
rte_mtr_stats_update(uint16_t port_id,
uint32_t mtr_id,
uint64_t stats_mask,
@@ -715,7 +715,7 @@ rte_mtr_stats_update(uint16_t port_id,
*
* @see enum rte_mtr_stats_type
*/
-int
+int __rte_experimental
rte_mtr_stats_read(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_stats *stats,
diff --git a/lib/librte_ether/rte_mtr_driver.h b/lib/librte_ether/rte_mtr_driver.h
index 6a289ef1..c9a6d7c3 100644
--- a/lib/librte_ether/rte_mtr_driver.h
+++ b/lib/librte_ether/rte_mtr_driver.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef __INCLUDE_RTE_MTR_DRIVER_H__
diff --git a/lib/librte_ether/rte_tm.c b/lib/librte_ether/rte_tm.c
index ceac3411..9709454f 100644
--- a/lib/librte_ether/rte_tm.c
+++ b/lib/librte_ether/rte_tm.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <stdint.h>
diff --git a/lib/librte_ether/rte_tm_driver.h b/lib/librte_ether/rte_tm_driver.h
index 2376943d..90114ff5 100644
--- a/lib/librte_ether/rte_tm_driver.h
+++ b/lib/librte_ether/rte_tm_driver.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef __INCLUDE_RTE_TM_DRIVER_H__