aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_ether
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_ether')
-rw-r--r--lib/librte_ether/Makefile2
-rw-r--r--lib/librte_ether/rte_eth_ctrl.h7
-rw-r--r--lib/librte_ether/rte_ethdev.c265
-rw-r--r--lib/librte_ether/rte_ethdev.h199
-rw-r--r--lib/librte_ether/rte_ether_version.map10
5 files changed, 342 insertions, 141 deletions
diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile
index e8102846..0bb5dc90 100644
--- a/lib/librte_ether/Makefile
+++ b/lib/librte_ether/Makefile
@@ -41,7 +41,7 @@ CFLAGS += $(WERROR_FLAGS)
EXPORT_MAP := rte_ether_version.map
-LIBABIVER := 3
+LIBABIVER := 4
SRCS-y += rte_ethdev.c
diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ether/rte_eth_ctrl.h
index b8c7be90..c3a2c9e4 100644
--- a/lib/librte_ether/rte_eth_ctrl.h
+++ b/lib/librte_ether/rte_eth_ctrl.h
@@ -74,7 +74,12 @@ extern "C" {
#define RTE_ETH_FLOW_IPV6_EX 15
#define RTE_ETH_FLOW_IPV6_TCP_EX 16
#define RTE_ETH_FLOW_IPV6_UDP_EX 17
-#define RTE_ETH_FLOW_MAX 18
+#define RTE_ETH_FLOW_PORT 18
+ /**< Consider device port number as a flow differentiator */
+#define RTE_ETH_FLOW_VXLAN 19 /**< VXLAN protocol based flow */
+#define RTE_ETH_FLOW_GENEVE 20 /**< GENEVE protocol based flow */
+#define RTE_ETH_FLOW_NVGRE 21 /**< NVGRE protocol based flow */
+#define RTE_ETH_FLOW_MAX 22
/**
* Feature filter types
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index a31018e8..eac260f1 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -77,6 +77,12 @@ static uint8_t nb_ports;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+/* spinlock for add/remove rx callbacks */
+static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* spinlock for add/remove tx callbacks */
+static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
/* store statistics names and its offset in stats structure */
struct rte_eth_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -369,8 +375,7 @@ rte_eth_dev_is_valid_port(uint8_t port_id)
int
rte_eth_dev_socket_id(uint8_t port_id)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return -1;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
return rte_eth_devices[port_id].data->numa_node;
}
@@ -383,8 +388,7 @@ rte_eth_dev_count(void)
static enum rte_eth_dev_type
rte_eth_dev_get_device_type(uint8_t port_id)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return RTE_ETH_DEV_UNKNOWN;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, RTE_ETH_DEV_UNKNOWN);
return rte_eth_devices[port_id].dev_type;
}
@@ -402,7 +406,7 @@ rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
return 0;
}
-static int
+int
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
{
char *tmp;
@@ -421,7 +425,7 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
return 0;
}
-static int
+int
rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
{
int i;
@@ -479,10 +483,7 @@ rte_eth_dev_is_detachable(uint8_t port_id)
{
uint32_t dev_flags;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -EINVAL;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
switch (rte_eth_devices[port_id].data->kdrv) {
case RTE_KDRV_IGB_UIO:
@@ -1507,9 +1508,85 @@ rte_eth_stats_reset(uint8_t port_id)
dev->data->rx_mbuf_alloc_failed = 0;
}
+static int
+get_xstats_count(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int count;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
+ if (dev->dev_ops->xstats_get_names != NULL) {
+ count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
+ if (count < 0)
+ return count;
+ } else
+ count = 0;
+ count += RTE_NB_STATS;
+ count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
+ count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
+ return count;
+}
+
+int
+rte_eth_xstats_get_names(uint8_t port_id,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned size)
+{
+ struct rte_eth_dev *dev;
+ int cnt_used_entries;
+ int cnt_expected_entries;
+ uint32_t idx, id_queue;
+
+ cnt_expected_entries = get_xstats_count(port_id);
+ if (xstats_names == NULL || cnt_expected_entries < 0 ||
+ (int)size < cnt_expected_entries)
+ return cnt_expected_entries;
+
+ /* port_id checked in get_xstats_count() */
+ dev = &rte_eth_devices[port_id];
+ if (dev->dev_ops->xstats_get_names != NULL) {
+ cnt_used_entries = (*dev->dev_ops->xstats_get_names)(
+ dev, xstats_names, size);
+ if (cnt_used_entries < 0)
+ return cnt_used_entries;
+ } else
+ /* Driver itself does not support extended stats, but
+ * still have basic stats.
+ */
+ cnt_used_entries = 0;
+
+ for (idx = 0; idx < RTE_NB_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "%s", rte_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ for (id_queue = 0; id_queue < dev->data->nb_rx_queues; id_queue++) {
+ for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "rx_q%u%s",
+ id_queue, rte_rxq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+
+ }
+ for (id_queue = 0; id_queue < dev->data->nb_tx_queues; id_queue++) {
+ for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "tx_q%u%s",
+ id_queue, rte_txq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ }
+ return cnt_used_entries;
+}
+
/* retrieve ethdev extended statistics */
int
-rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
+rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
unsigned n)
{
struct rte_eth_stats eth_stats;
@@ -1551,8 +1628,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
stats_ptr = RTE_PTR_ADD(&eth_stats,
rte_stats_strings[i].offset);
val = *stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "%s", rte_stats_strings[i].name);
+ xstats[count].id = count + xcount;
xstats[count++].value = val;
}
@@ -1563,9 +1639,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
rte_rxq_stats_strings[i].offset +
q * sizeof(uint64_t));
val = *stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_q%u_%s", q,
- rte_rxq_stats_strings[i].name);
+ xstats[count].id = count + xcount;
xstats[count++].value = val;
}
}
@@ -1577,9 +1651,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
rte_txq_stats_strings[i].offset +
q * sizeof(uint64_t));
val = *stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_q%u_%s", q,
- rte_txq_stats_strings[i].name);
+ xstats[count].id = count + xcount;
xstats[count++].value = val;
}
}
@@ -1639,7 +1711,6 @@ rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
STAT_QMAP_RX);
}
-
void
rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
{
@@ -1661,6 +1732,8 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->pci_dev = dev->pci_dev;
dev_info->driver_name = dev->data->drv_name;
+ dev_info->nb_rx_queues = dev->data->nb_rx_queues;
+ dev_info->nb_tx_queues = dev->data->nb_tx_queues;
}
int
@@ -1994,10 +2067,7 @@ rte_eth_dev_rss_reta_query(uint8_t port_id,
struct rte_eth_dev *dev;
int ret;
- if (port_id >= nb_ports) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
/* Check mask bits */
ret = rte_eth_check_reta_mask(reta_conf, reta_size);
@@ -2641,10 +2711,7 @@ rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
uint16_t qid;
int rc;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
intr_handle = &dev->pci_dev->intr_handle;
@@ -2699,10 +2766,7 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
struct rte_intr_handle *intr_handle;
int rc;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
@@ -2734,10 +2798,7 @@ rte_eth_dev_rx_intr_enable(uint8_t port_id,
{
struct rte_eth_dev *dev;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2751,10 +2812,7 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id,
{
struct rte_eth_dev *dev;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2925,7 +2983,6 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
rte_errno = EINVAL;
return NULL;
}
-
struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
if (cb == NULL) {
@@ -2936,6 +2993,7 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
cb->fn.rx = fn;
cb->param = user_param;
+ rte_spinlock_lock(&rte_eth_rx_cb_lock);
/* Add the callbacks in fifo order. */
struct rte_eth_rxtx_callback *tail =
rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
@@ -2948,6 +3006,42 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
tail = tail->next;
tail->next = cb;
}
+ rte_spinlock_unlock(&rte_eth_rx_cb_lock);
+
+ return cb;
+}
+
+void *
+rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
+ rte_rx_callback_fn fn, void *user_param)
+{
+#ifndef RTE_ETHDEV_RXTX_CALLBACKS
+ rte_errno = ENOTSUP;
+ return NULL;
+#endif
+ /* check input parameters */
+ if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
+
+ if (cb == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ cb->fn.rx = fn;
+ cb->param = user_param;
+
+ rte_spinlock_lock(&rte_eth_rx_cb_lock);
+ /* Add the callbacks at fisrt position*/
+ cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
+ rte_smp_wmb();
+ rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
+ rte_spinlock_unlock(&rte_eth_rx_cb_lock);
return cb;
}
@@ -2977,6 +3071,7 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
cb->fn.tx = fn;
cb->param = user_param;
+ rte_spinlock_lock(&rte_eth_tx_cb_lock);
/* Add the callbacks in fifo order. */
struct rte_eth_rxtx_callback *tail =
rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
@@ -2989,6 +3084,7 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
tail = tail->next;
tail->next = cb;
}
+ rte_spinlock_unlock(&rte_eth_tx_cb_lock);
return cb;
}
@@ -3001,35 +3097,30 @@ rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
return -ENOTSUP;
#endif
/* Check input parameters. */
- if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
- queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ if (user_cb == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
return -EINVAL;
- }
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
- struct rte_eth_rxtx_callback *prev_cb;
-
- /* Reset head pointer and remove user cb if first in the list. */
- if (cb == user_cb) {
- dev->post_rx_burst_cbs[queue_id] = user_cb->next;
- return 0;
- }
-
- /* Remove the user cb from the callback list. */
- do {
- prev_cb = cb;
- cb = cb->next;
-
+ struct rte_eth_rxtx_callback *cb;
+ struct rte_eth_rxtx_callback **prev_cb;
+ int ret = -EINVAL;
+
+ rte_spinlock_lock(&rte_eth_rx_cb_lock);
+ prev_cb = &dev->post_rx_burst_cbs[queue_id];
+ for (; *prev_cb != NULL; prev_cb = &cb->next) {
+ cb = *prev_cb;
if (cb == user_cb) {
- prev_cb->next = user_cb->next;
- return 0;
+ /* Remove the user cb from the callback list. */
+ *prev_cb = cb->next;
+ ret = 0;
+ break;
}
+ }
+ rte_spinlock_unlock(&rte_eth_rx_cb_lock);
- } while (cb != NULL);
-
- /* Callback wasn't found. */
- return -EINVAL;
+ return ret;
}
int
@@ -3040,35 +3131,30 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
return -ENOTSUP;
#endif
/* Check input parameters. */
- if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
- queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ if (user_cb == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
return -EINVAL;
- }
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
- struct rte_eth_rxtx_callback *prev_cb;
-
- /* Reset head pointer and remove user cb if first in the list. */
- if (cb == user_cb) {
- dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
- return 0;
- }
-
- /* Remove the user cb from the callback list. */
- do {
- prev_cb = cb;
- cb = cb->next;
-
+ int ret = -EINVAL;
+ struct rte_eth_rxtx_callback *cb;
+ struct rte_eth_rxtx_callback **prev_cb;
+
+ rte_spinlock_lock(&rte_eth_tx_cb_lock);
+ prev_cb = &dev->pre_tx_burst_cbs[queue_id];
+ for (; *prev_cb != NULL; prev_cb = &cb->next) {
+ cb = *prev_cb;
if (cb == user_cb) {
- prev_cb->next = user_cb->next;
- return 0;
+ /* Remove the user cb from the callback list. */
+ *prev_cb = cb->next;
+ ret = 0;
+ break;
}
+ }
+ rte_spinlock_unlock(&rte_eth_tx_cb_lock);
- } while (cb != NULL);
-
- /* Callback wasn't found. */
- return -EINVAL;
+ return ret;
}
int
@@ -3284,10 +3370,7 @@ rte_eth_dev_get_dcb_info(uint8_t port_id,
{
struct rte_eth_dev *dev;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 022733ec..0f173231 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -102,7 +102,7 @@
* rte_eth_dev_configure(), rte_eth_tx_queue_setup(), or
* rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the
* device and then do the reconfiguration before calling rte_eth_dev_start()
- * again. The tramsit and receive functions should not be invoked when the
+ * again. The transmit and receive functions should not be invoked when the
* device is stopped.
*
* Please note that some configuration is not stored between calls to
@@ -200,27 +200,9 @@ struct rte_eth_stats {
/**< Total of RX packets dropped by the HW,
* because there are no available mbufs (i.e. RX queues are full).
*/
- uint64_t ibadcrc __rte_deprecated;
- /**< Deprecated; Total of RX packets with CRC error. */
- uint64_t ibadlen __rte_deprecated;
- /**< Deprecated; Total of RX packets with bad length. */
uint64_t ierrors; /**< Total number of erroneous received packets. */
uint64_t oerrors; /**< Total number of failed transmitted packets. */
- uint64_t imcasts;
- /**< Deprecated; Total number of multicast received packets. */
uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */
- uint64_t fdirmatch __rte_deprecated;
- /**< Deprecated; Total number of RX packets matching a filter. */
- uint64_t fdirmiss __rte_deprecated;
- /**< Deprecated; Total number of RX packets not matching any filter. */
- uint64_t tx_pause_xon __rte_deprecated;
- /**< Deprecated; Total nb. of XON pause frame sent. */
- uint64_t rx_pause_xon __rte_deprecated;
- /**< Deprecated; Total nb. of XON pause frame received. */
- uint64_t tx_pause_xoff __rte_deprecated;
- /**< Deprecated; Total nb. of XOFF pause frame sent. */
- uint64_t rx_pause_xoff __rte_deprecated;
- /**< Deprecated; Total nb. of XOFF pause frame received. */
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
/**< Total number of queue RX packets. */
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
@@ -231,14 +213,6 @@ struct rte_eth_stats {
/**< Total number of successfully transmitted queue bytes. */
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
/**< Total number of queue packets received that are dropped. */
- uint64_t ilbpackets;
- /**< Total number of good packets received from loopback,VF Only */
- uint64_t olbpackets;
- /**< Total number of good packets transmitted to loopback,VF Only */
- uint64_t ilbbytes;
- /**< Total number of good bytes received from loopback,VF Only */
- uint64_t olbbytes;
- /**< Total number of good bytes transmitted to loopback,VF Only */
};
/**
@@ -389,8 +363,8 @@ struct rte_eth_rxmode {
*/
enum rte_vlan_type {
ETH_VLAN_TYPE_UNKNOWN = 0,
- ETH_VLAN_TYPE_INNER, /**< Single VLAN, or inner VLAN. */
- ETH_VLAN_TYPE_OUTER, /**< Outer VLAN. */
+ ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+ ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
ETH_VLAN_TYPE_MAX,
};
@@ -439,6 +413,10 @@ struct rte_eth_rss_conf {
#define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
#define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
#define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
+#define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
+#define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
+#define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
+#define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
#define ETH_RSS_IP ( \
ETH_RSS_IPV4 | \
@@ -463,6 +441,12 @@ struct rte_eth_rss_conf {
ETH_RSS_NONFRAG_IPV4_SCTP | \
ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_TUNNEL ( \
+ ETH_RSS_VXLAN | \
+ ETH_RSS_GENEVE | \
+ ETH_RSS_NVGRE)
+
+
/**< Mask of valid RSS hash protocols */
#define ETH_RSS_PROTO_MASK ( \
ETH_RSS_IPV4 | \
@@ -480,7 +464,11 @@ struct rte_eth_rss_conf {
ETH_RSS_L2_PAYLOAD | \
ETH_RSS_IPV6_EX | \
ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ ETH_RSS_IPV6_UDP_EX | \
+ ETH_RSS_PORT | \
+ ETH_RSS_VXLAN | \
+ ETH_RSS_GENEVE | \
+ ETH_RSS_NVGRE)
/*
* Definitions used for redirection table entry size.
@@ -489,6 +477,7 @@ struct rte_eth_rss_conf {
*/
#define ETH_RSS_RETA_SIZE_64 64
#define ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_256 256
#define ETH_RSS_RETA_SIZE_512 512
#define RTE_RETA_GROUP_SIZE 64
@@ -908,6 +897,9 @@ struct rte_eth_dev_info {
struct rte_eth_desc_lim rx_desc_lim; /**< RX descriptors limits */
struct rte_eth_desc_lim tx_desc_lim; /**< TX descriptors limits */
uint32_t speed_capa; /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+ /** Configured number of rx/tx queues */
+ uint16_t nb_rx_queues; /**< Number of RX queues. */
+ uint16_t nb_tx_queues; /**< Number of TX queues. */
};
/**
@@ -940,11 +932,21 @@ struct rte_eth_txq_info {
* statistics that are not provided in the generic rte_eth_stats
* structure.
*/
-struct rte_eth_xstats {
- char name[RTE_ETH_XSTATS_NAME_SIZE];
+struct rte_eth_xstat {
+ uint64_t id;
uint64_t value;
};
+/**
+ * A name-key lookup element for extended statistics.
+ *
+ * This structure is used to map between names and ID numbers
+ * for extended ethernet statistics.
+ */
+struct rte_eth_xstat_name {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+};
+
#define ETH_DCB_NUM_TCS 8
#define ETH_MAX_VMDQ_POOL 64
@@ -1074,12 +1076,16 @@ typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
/**< @internal Reset global I/O statistics of an Ethernet device to 0. */
typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_xstats *stats, unsigned n);
+ struct rte_eth_xstat *stats, unsigned n);
/**< @internal Get extended stats of an Ethernet device. */
typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
/**< @internal Reset extended stats of an Ethernet device. */
+typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned size);
+/**< @internal Get names of extended stats of an Ethernet device. */
+
typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
@@ -1150,7 +1156,7 @@ typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
enum rte_vlan_type type, uint16_t tpid);
-/**< @internal set the outer VLAN-TPID by an Ethernet device. */
+/**< @internal set the outer/inner VLAN-TPID by an Ethernet device. */
typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
/**< @internal set VLAN offload function by an Ethernet device. */
@@ -1427,6 +1433,8 @@ struct eth_dev_ops {
eth_stats_reset_t stats_reset; /**< Reset generic device statistics. */
eth_xstats_get_t xstats_get; /**< Get extended device statistics. */
eth_xstats_reset_t xstats_reset; /**< Reset extended device statistics. */
+ eth_xstats_get_names_t xstats_get_names;
+ /**< Get names of extended statistics. */
eth_queue_stats_mapping_set_t queue_stats_mapping_set;
/**< Configure per queue stat counter mapping. */
eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
@@ -1434,7 +1442,7 @@ struct eth_dev_ops {
/**< Get packet types supported and identified by device*/
mtu_set_t mtu_set; /**< Set MTU. */
vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
- vlan_tpid_set_t vlan_tpid_set; /**< Outer VLAN TPID Setup. */
+ vlan_tpid_set_t vlan_tpid_set; /**< Outer/Inner VLAN TPID Setup. */
vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */
vlan_offload_set_t vlan_offload_set; /**< Set VLAN Offload. */
vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion */
@@ -1641,7 +1649,7 @@ struct rte_eth_dev {
struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
uint8_t attached; /**< Flag indicating the port is attached */
enum rte_eth_dev_type dev_type; /**< Flag indicating the device type */
-};
+} __rte_cache_aligned;
struct rte_eth_dev_sriov {
uint8_t active; /**< SRIOV is active with 16, 32 or 64 pools */
@@ -2015,7 +2023,7 @@ int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-/*
+/**
* Return the NUMA socket to which an Ethernet device is connected
*
* @param port_id
@@ -2027,7 +2035,7 @@ int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
*/
int rte_eth_dev_socket_id(uint8_t port_id);
-/*
+/**
* Check if port_id of device is attached
*
* @param port_id
@@ -2038,7 +2046,7 @@ int rte_eth_dev_socket_id(uint8_t port_id);
*/
int rte_eth_dev_is_valid_port(uint8_t port_id);
-/*
+/**
* Allocate mbuf from mempool, setup the DMA physical address
* and then start RX for specified queue of a port. It is used
* when rx_deferred_start flag of the specified queue is true.
@@ -2056,7 +2064,7 @@ int rte_eth_dev_is_valid_port(uint8_t port_id);
*/
int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
-/*
+/**
* Stop specified RX queue of a port
*
* @param port_id
@@ -2072,7 +2080,7 @@ int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
*/
int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
-/*
+/**
* Start TX for specified queue of a port. It is used when tx_deferred_start
* flag of the specified queue is true.
*
@@ -2089,7 +2097,7 @@ int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
*/
int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
-/*
+/**
* Stop specified TX queue of a port
*
* @param port_id
@@ -2279,13 +2287,36 @@ int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
void rte_eth_stats_reset(uint8_t port_id);
/**
+ * Retrieve names of extended statistics of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param xstats_names
+ * Block of memory to insert names into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity.
+ * @param size
+ * Capacity of xstats_names (number of names).
+ * @return
+ * - positive value lower or equal to size: success. The return value
+ * is the number of entries filled in the stats table.
+ * - positive value higher than size: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * - negative value on error (invalid port id)
+ */
+int rte_eth_xstats_get_names(uint8_t port_id,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned size);
+
+/**
* Retrieve extended statistics of an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
* @param xstats
- * A pointer to a table of structure of type *rte_eth_xstats*
- * to be filled with device statistics names and values.
+ * A pointer to a table of structure of type *rte_eth_xstat*
+ * to be filled with device statistics ids and values.
* This parameter can be set to NULL if n is 0.
* @param n
* The size of the stats table, which should be large enough to store
@@ -2299,7 +2330,7 @@ void rte_eth_stats_reset(uint8_t port_id);
* shall not be used by the caller.
* - negative value on error (invalid port id)
*/
-int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
+int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
unsigned n);
/**
@@ -2376,6 +2407,21 @@ void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
/**
* Retrieve the supported packet types of an Ethernet device.
*
+ * When a packet type is announced as supported, it *must* be recognized by
+ * the PMD. For instance, if RTE_PTYPE_L2_ETHER, RTE_PTYPE_L2_ETHER_VLAN
+ * and RTE_PTYPE_L3_IPV4 are announced, the PMD must return the following
+ * packet types for these packets:
+ * - Ether/IPv4 -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
+ * - Ether/Vlan/IPv4 -> RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4
+ * - Ether/[anything else] -> RTE_PTYPE_L2_ETHER
+ * - Ether/Vlan/[anything else] -> RTE_PTYPE_L2_ETHER_VLAN
+ *
+ * When a packet is received by a PMD, the most precise type must be
+ * returned among the ones supported. However a PMD is allowed to set
+ * packet type that is not in the supported list, at the condition that it
+ * is more precise. Therefore, a PMD announcing no supported packet types
+ * can still set a matching packet type in a received packet.
+ *
* @note
* Better to invoke this API after the device is already started or rx burst
* function is decided, to obtain correct supported ptypes.
@@ -2424,6 +2470,7 @@ int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
* - (-ENOTSUP) if operation is not supported.
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if *mtu* invalid.
+ * - (-EBUSY) if operation is not allowed when the port is running
*/
int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
@@ -2709,7 +2756,8 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
* on the output queue *queue_id* of the Ethernet device designated by its
* *port_id*.
* The *nb_pkts* parameter is the number of packets to send which are
- * supplied in the *tx_pkts* array of *rte_mbuf* structures.
+ * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
+ * allocated from a pool created with rte_pktmbuf_pool_create().
* The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
* up to the number of transmit descriptors available in the TX ring of the
* transmit queue.
@@ -3851,6 +3899,34 @@ int rte_eth_dev_get_dcb_info(uint8_t port_id,
void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
+/*
+* Add a callback that must be called first on packet RX on a given port
+* and queue.
+*
+* This API configures a first function to be called for each burst of
+* packets received on a given NIC port queue. The return value is a pointer
+* that can be used to later remove the callback using
+* rte_eth_remove_rx_callback().
+*
+* Multiple functions are called in the order that they are added.
+*
+* @param port_id
+* The port identifier of the Ethernet device.
+* @param queue_id
+* The queue on the Ethernet device on which the callback is to be added.
+* @param fn
+* The callback function
+* @param user_param
+* A generic pointer parameter which will be passed to each invocation of the
+* callback function on this port and queue.
+*
+* @return
+* NULL on error.
+* On success, a pointer value which can later be used to remove the callback.
+*/
+void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
+ rte_rx_callback_fn fn, void *user_param);
+
/**
* Add a callback to be called on packet TX on a given port and queue.
*
@@ -3984,7 +4060,7 @@ int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
-/*
+/**
* Retrieve number of available registers for access
*
* @param port_id
@@ -4279,6 +4355,35 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
uint32_t mask,
uint8_t en);
+/**
+* Get the port id from pci adrress or device name
+* Ex: 0000:2:00.0 or vdev name eth_pcap0
+*
+* @param name
+* pci address or name of the device
+* @param port_id
+* pointer to port identifier of the device
+* @return
+* - (0) if successful.
+* - (-ENODEV or -EINVAL) on failure.
+*/
+int
+rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
+
+/**
+* Get the device name from port id
+*
+* @param port_id
+* pointer to port identifier of the device
+* @param name
+* pci address or name of the device
+* @return
+* - (0) if successful.
+* - (-EINVAL) on failure.
+*/
+int
+rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map
index 214ecc73..e1ccebe0 100644
--- a/lib/librte_ether/rte_ether_version.map
+++ b/lib/librte_ether/rte_ether_version.map
@@ -66,7 +66,6 @@ DPDK_2.2 {
rte_eth_dev_set_vf_rxmode;
rte_eth_dev_set_vf_tx;
rte_eth_dev_set_vf_vlan_filter;
- rte_eth_dev_set_vlan_ether_type;
rte_eth_dev_set_vlan_offload;
rte_eth_dev_set_vlan_pvid;
rte_eth_dev_set_vlan_strip_on_queue;
@@ -132,3 +131,12 @@ DPDK_16.04 {
rte_eth_tx_buffer_set_err_callback;
} DPDK_2.2;
+
+DPDK_16.07 {
+ global:
+
+ rte_eth_add_first_rx_callback;
+ rte_eth_dev_get_name_by_port;
+ rte_eth_dev_get_port_by_name;
+ rte_eth_xstats_get_names;
+} DPDK_16.04;