summaryrefslogtreecommitdiffstats
path: root/lib/librte_eventdev
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_eventdev')
-rw-r--r--lib/librte_eventdev/Makefile5
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.c1240
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.h444
-rw-r--r--lib/librte_eventdev/rte_eventdev.c286
-rw-r--r--lib/librte_eventdev/rte_eventdev.h312
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h182
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd_pci.h1
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd_vdev.h2
-rw-r--r--lib/librte_eventdev/rte_eventdev_version.map27
9 files changed, 2257 insertions, 242 deletions
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
index 410578a1..5ac22cde 100644
--- a/lib/librte_eventdev/Makefile
+++ b/lib/librte_eventdev/Makefile
@@ -34,15 +34,17 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_eventdev.a
# library version
-LIBABIVER := 2
+LIBABIVER := 3
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_ring -lrte_ethdev -lrte_hash
# library source files
SRCS-y += rte_eventdev.c
SRCS-y += rte_event_ring.c
+SRCS-y += rte_event_eth_rx_adapter.c
# export include files
SYMLINK-y-include += rte_eventdev.h
@@ -50,6 +52,7 @@ SYMLINK-y-include += rte_eventdev_pmd.h
SYMLINK-y-include += rte_eventdev_pmd_pci.h
SYMLINK-y-include += rte_eventdev_pmd_vdev.h
SYMLINK-y-include += rte_event_ring.h
+SYMLINK-y-include += rte_event_eth_rx_adapter.h
# versioning export map
EXPORT_MAP := rte_eventdev_version.map
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
new file mode 100644
index 00000000..90106e6c
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -0,0 +1,1240 @@
+#include <rte_cycles.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_service_component.h>
+#include <rte_thash.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_eth_rx_adapter.h"
+
+#define BATCH_SIZE 32
+#define BLOCK_CNT_THRESHOLD 10
+#define ETH_EVENT_BUFFER_SIZE (4*BATCH_SIZE)
+
+#define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
+#define ETH_RX_ADAPTER_MEM_NAME_LEN 32
+
+#define RSS_KEY_SIZE 40
+
+/*
+ * There is an instance of this struct per polled Rx queue added to the
+ * adapter
+ */
+struct eth_rx_poll_entry {
+ /* Eth port to poll */
+ uint8_t eth_dev_id;
+ /* Eth rx queue to poll */
+ uint16_t eth_rx_qid;
+};
+
+/* Instance per adapter */
+struct rte_eth_event_enqueue_buffer {
+ /* Count of events in this buffer */
+ uint16_t count;
+ /* Array of events in this buffer */
+ struct rte_event events[ETH_EVENT_BUFFER_SIZE];
+};
+
+struct rte_event_eth_rx_adapter {
+ /* RSS key */
+ uint8_t rss_key_be[RSS_KEY_SIZE];
+ /* Event device identifier */
+ uint8_t eventdev_id;
+ /* Per ethernet device structure */
+ struct eth_device_info *eth_devices;
+ /* Event port identifier */
+ uint8_t event_port_id;
+ /* Lock to serialize config updates with service function */
+ rte_spinlock_t rx_lock;
+ /* Max mbufs processed in any service function invocation */
+ uint32_t max_nb_rx;
+ /* Receive queues that need to be polled */
+ struct eth_rx_poll_entry *eth_rx_poll;
+ /* Size of the eth_rx_poll array */
+ uint16_t num_rx_polled;
+ /* Weighted round robin schedule */
+ uint32_t *wrr_sched;
+ /* wrr_sched[] size */
+ uint32_t wrr_len;
+ /* Next entry in wrr[] to begin polling */
+ uint32_t wrr_pos;
+ /* Event burst buffer */
+ struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+ /* Per adapter stats */
+ struct rte_event_eth_rx_adapter_stats stats;
+ /* Block count, counts up to BLOCK_CNT_THRESHOLD */
+ uint16_t enq_block_count;
+ /* Block start ts */
+ uint64_t rx_enq_block_start_ts;
+ /* Configuration callback for rte_service configuration */
+ rte_event_eth_rx_adapter_conf_cb conf_cb;
+ /* Configuration callback argument */
+ void *conf_arg;
+ /* Set if default_cb is being used */
+ int default_cb_arg;
+ /* Service initialization state */
+ uint8_t service_inited;
+ /* Total count of Rx queues in adapter */
+ uint32_t nb_queues;
+ /* Memory allocation name */
+ char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
+ /* Socket identifier cached from eventdev */
+ int socket_id;
+ /* Per adapter EAL service */
+ uint32_t service_id;
+} __rte_cache_aligned;
+
+/* Per eth device */
+struct eth_device_info {
+ struct rte_eth_dev *dev;
+ struct eth_rx_queue_info *rx_queue;
+ /* Set if ethdev->eventdev packet transfer uses a
+ * hardware mechanism
+ */
+ uint8_t internal_event_port;
+ /* Set if the adapter is processing rx queues for
+ * this eth device and packet processing has been
+ * started, allows for the code to know if the PMD
+ * rx_adapter_stop callback needs to be invoked
+ */
+ uint8_t dev_rx_started;
+ /* If nb_dev_queues > 0, the start callback will
+ * be invoked if not already invoked
+ */
+ uint16_t nb_dev_queues;
+};
+
+/* Per Rx queue */
+struct eth_rx_queue_info {
+ int queue_enabled; /* True if added */
+ uint16_t wt; /* Polling weight */
+ uint8_t event_queue_id; /* Event queue to enqueue packets to */
+ uint8_t sched_type; /* Sched type for events */
+ uint8_t priority; /* Event priority */
+ uint32_t flow_id; /* App provided flow identifier */
+ uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
+};
+
+static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+
+static inline int
+valid_id(uint8_t id)
+{
+ return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
+}
+
+#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
+ if (!valid_id(id)) { \
+ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
+ return retval; \
+ } \
+} while (0)
+
+static inline int
+sw_rx_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ return rx_adapter->num_rx_polled;
+}
+
+/* Greatest common divisor */
+static uint16_t gcd_u16(uint16_t a, uint16_t b)
+{
+ uint16_t r = a % b;
+
+ return r ? gcd_u16(b, r) : b;
+}
+
+/* Returns the next queue in the polling sequence
+ *
+ * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
+ */
+static int
+wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
+ unsigned int n, int *cw,
+ struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+ uint16_t gcd, int prev)
+{
+ int i = prev;
+ uint16_t w;
+
+ while (1) {
+ uint16_t q;
+ uint8_t d;
+
+ i = (i + 1) % n;
+ if (i == 0) {
+ *cw = *cw - gcd;
+ if (*cw <= 0)
+ *cw = max_wt;
+ }
+
+ q = eth_rx_poll[i].eth_rx_qid;
+ d = eth_rx_poll[i].eth_dev_id;
+ w = rx_adapter->eth_devices[d].rx_queue[q].wt;
+
+ if ((int)w >= *cw)
+ return i;
+ }
+}
+
+/* Precalculate WRR polling sequence for all queues in rx_adapter */
+static int
+eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ uint8_t d;
+ uint16_t q;
+ unsigned int i;
+
+ /* Initialize variables for calculation of wrr schedule */
+ uint16_t max_wrr_pos = 0;
+ unsigned int poll_q = 0;
+ uint16_t max_wt = 0;
+ uint16_t gcd = 0;
+
+ struct eth_rx_poll_entry *rx_poll = NULL;
+ uint32_t *rx_wrr = NULL;
+
+ if (rx_adapter->num_rx_polled) {
+ size_t len = RTE_ALIGN(rx_adapter->num_rx_polled *
+ sizeof(*rx_adapter->eth_rx_poll),
+ RTE_CACHE_LINE_SIZE);
+ rx_poll = rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+ if (rx_poll == NULL)
+ return -ENOMEM;
+
+ /* Generate array of all queues to poll, the size of this
+ * array is poll_q
+ */
+ for (d = 0; d < rte_eth_dev_count(); d++) {
+ uint16_t nb_rx_queues;
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[d];
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ if (dev_info->rx_queue == NULL)
+ continue;
+ for (q = 0; q < nb_rx_queues; q++) {
+ struct eth_rx_queue_info *queue_info =
+ &dev_info->rx_queue[q];
+ if (queue_info->queue_enabled == 0)
+ continue;
+
+ uint16_t wt = queue_info->wt;
+ rx_poll[poll_q].eth_dev_id = d;
+ rx_poll[poll_q].eth_rx_qid = q;
+ max_wrr_pos += wt;
+ max_wt = RTE_MAX(max_wt, wt);
+ gcd = (gcd) ? gcd_u16(gcd, wt) : wt;
+ poll_q++;
+ }
+ }
+
+ len = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr),
+ RTE_CACHE_LINE_SIZE);
+ rx_wrr = rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+ if (rx_wrr == NULL) {
+ rte_free(rx_poll);
+ return -ENOMEM;
+ }
+
+ /* Generate polling sequence based on weights */
+ int prev = -1;
+ int cw = -1;
+ for (i = 0; i < max_wrr_pos; i++) {
+ rx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,
+ rx_poll, max_wt, gcd, prev);
+ prev = rx_wrr[i];
+ }
+ }
+
+ rte_free(rx_adapter->eth_rx_poll);
+ rte_free(rx_adapter->wrr_sched);
+
+ rx_adapter->eth_rx_poll = rx_poll;
+ rx_adapter->wrr_sched = rx_wrr;
+ rx_adapter->wrr_len = max_wrr_pos;
+
+ return 0;
+}
+
+static inline void
+mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
+ struct ipv6_hdr **ipv6_hdr)
+{
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ struct vlan_hdr *vlan_hdr;
+
+ *ipv4_hdr = NULL;
+ *ipv6_hdr = NULL;
+
+ switch (eth_hdr->ether_type) {
+ case RTE_BE16(ETHER_TYPE_IPv4):
+ *ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ break;
+
+ case RTE_BE16(ETHER_TYPE_IPv6):
+ *ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ break;
+
+ case RTE_BE16(ETHER_TYPE_VLAN):
+ vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+ switch (vlan_hdr->eth_proto) {
+ case RTE_BE16(ETHER_TYPE_IPv4):
+ *ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);
+ break;
+ case RTE_BE16(ETHER_TYPE_IPv6):
+ *ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Calculate RSS hash for IPv4/6 */
+static inline uint32_t
+do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
+{
+ uint32_t input_len;
+ void *tuple;
+ struct rte_ipv4_tuple ipv4_tuple;
+ struct rte_ipv6_tuple ipv6_tuple;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+
+ mtoip(m, &ipv4_hdr, &ipv6_hdr);
+
+ if (ipv4_hdr) {
+ ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
+ ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
+ tuple = &ipv4_tuple;
+ input_len = RTE_THASH_V4_L3_LEN;
+ } else if (ipv6_hdr) {
+ rte_thash_load_v6_addrs(ipv6_hdr,
+ (union rte_thash_tuple *)&ipv6_tuple);
+ tuple = &ipv6_tuple;
+ input_len = RTE_THASH_V6_L3_LEN;
+ } else
+ return 0;
+
+ return rte_softrss_be(tuple, input_len, rss_key_be);
+}
+
+static inline int
+rx_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ return !!rx_adapter->enq_block_count;
+}
+
+static inline void
+rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ if (rx_adapter->rx_enq_block_start_ts)
+ return;
+
+ rx_adapter->enq_block_count++;
+ if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
+ return;
+
+ rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
+}
+
+static inline void
+rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_event_eth_rx_adapter_stats *stats)
+{
+ if (unlikely(!stats->rx_enq_start_ts))
+ stats->rx_enq_start_ts = rte_get_tsc_cycles();
+
+ if (likely(!rx_enq_blocked(rx_adapter)))
+ return;
+
+ rx_adapter->enq_block_count = 0;
+ if (rx_adapter->rx_enq_block_start_ts) {
+ stats->rx_enq_end_ts = rte_get_tsc_cycles();
+ stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
+ rx_adapter->rx_enq_block_start_ts;
+ rx_adapter->rx_enq_block_start_ts = 0;
+ }
+}
+
+/* Add event to buffer, free space check is done prior to calling
+ * this function
+ */
+static inline void
+buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_event *ev)
+{
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ rte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event));
+}
+
+/* Enqueue buffered events to event device */
+static inline uint16_t
+flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
+
+ uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
+ rx_adapter->event_port_id,
+ buf->events,
+ buf->count);
+ if (n != buf->count) {
+ memmove(buf->events,
+ &buf->events[n],
+ (buf->count - n) * sizeof(struct rte_event));
+ stats->rx_enq_retry++;
+ }
+
+ n ? rx_enq_block_end_ts(rx_adapter, stats) :
+ rx_enq_block_start_ts(rx_adapter);
+
+ buf->count -= n;
+ stats->rx_enq_count += n;
+
+ return n;
+}
+
+static inline void
+fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint8_t dev_id,
+ uint16_t rx_queue_id,
+ struct rte_mbuf **mbufs,
+ uint16_t num)
+{
+ uint32_t i;
+ struct eth_device_info *eth_device_info =
+ &rx_adapter->eth_devices[dev_id];
+ struct eth_rx_queue_info *eth_rx_queue_info =
+ &eth_device_info->rx_queue[rx_queue_id];
+
+ int32_t qid = eth_rx_queue_info->event_queue_id;
+ uint8_t sched_type = eth_rx_queue_info->sched_type;
+ uint8_t priority = eth_rx_queue_info->priority;
+ uint32_t flow_id;
+ struct rte_event events[BATCH_SIZE];
+ struct rte_mbuf *m = mbufs[0];
+ uint32_t rss_mask;
+ uint32_t rss;
+ int do_rss;
+
+ /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
+ rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
+ do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
+
+ for (i = 0; i < num; i++) {
+ m = mbufs[i];
+ struct rte_event *ev = &events[i];
+
+ rss = do_rss ?
+ do_softrss(m, rx_adapter->rss_key_be) : m->hash.rss;
+ flow_id =
+ eth_rx_queue_info->flow_id &
+ eth_rx_queue_info->flow_id_mask;
+ flow_id |= rss & ~eth_rx_queue_info->flow_id_mask;
+
+ ev->flow_id = flow_id;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = sched_type;
+ ev->queue_id = qid;
+ ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
+ ev->sub_event_type = 0;
+ ev->priority = priority;
+ ev->mbuf = m;
+
+ buf_event_enqueue(rx_adapter, ev);
+ }
+}
+
+/*
+ * Polls receive queues added to the event adapter and enqueues received
+ * packets to the event device.
+ *
+ * The receive code enqueues initially to a temporary buffer, the
+ * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
+ *
+ * If there isn't space available in the temporary buffer, packets from the
+ * Rx queue aren't dequeued from the eth device, this back pressures the
+ * eth device, in virtual device environments this back pressure is relayed to
+ * the hypervisor's switching layer where adjustments can be made to deal with
+ * it.
+ */
+static inline uint32_t
+eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ uint32_t num_queue;
+ uint16_t n;
+ uint32_t nb_rx = 0;
+ struct rte_mbuf *mbufs[BATCH_SIZE];
+ struct rte_eth_event_enqueue_buffer *buf;
+ uint32_t wrr_pos;
+ uint32_t max_nb_rx;
+
+ wrr_pos = rx_adapter->wrr_pos;
+ max_nb_rx = rx_adapter->max_nb_rx;
+ buf = &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
+
+ /* Iterate through a WRR sequence */
+ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
+ unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
+ uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
+ uint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
+
+ /* Don't do a batch dequeue from the rx queue if there isn't
+ * enough space in the enqueue buffer.
+ */
+ if (buf->count >= BATCH_SIZE)
+ flush_event_buffer(rx_adapter);
+ if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))
+ break;
+
+ stats->rx_poll_count++;
+ n = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);
+
+ if (n) {
+ stats->rx_packets += n;
+ /* The check before rte_eth_rx_burst() ensures that
+ * all n mbufs can be buffered
+ */
+ fill_event_buffer(rx_adapter, d, qid, mbufs, n);
+ nb_rx += n;
+ if (nb_rx > max_nb_rx) {
+ rx_adapter->wrr_pos =
+ (wrr_pos + 1) % rx_adapter->wrr_len;
+ return nb_rx;
+ }
+ }
+
+ if (++wrr_pos == rx_adapter->wrr_len)
+ wrr_pos = 0;
+ }
+
+ return nb_rx;
+}
+
+static int
+event_eth_rx_adapter_service_func(void *args)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter = args;
+ struct rte_eth_event_enqueue_buffer *buf;
+
+ buf = &rx_adapter->event_enqueue_buffer;
+ if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
+ return 0;
+ if (eth_rx_poll(rx_adapter) == 0 && buf->count)
+ flush_event_buffer(rx_adapter);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ return 0;
+}
+
+static int
+rte_event_eth_rx_adapter_init(void)
+{
+ const char *name = "rte_event_eth_rx_adapter_array";
+ const struct rte_memzone *mz;
+ unsigned int sz;
+
+ sz = sizeof(*event_eth_rx_adapter) *
+ RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
+ RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
+ PRId32, rte_errno);
+ return -rte_errno;
+ }
+ }
+
+ event_eth_rx_adapter = mz->addr;
+ return 0;
+}
+
+static inline struct rte_event_eth_rx_adapter *
+id_to_rx_adapter(uint8_t id)
+{
+ return event_eth_rx_adapter ?
+ event_eth_rx_adapter[id] : NULL;
+}
+
+static int
+default_conf_cb(uint8_t id, uint8_t dev_id,
+ struct rte_event_eth_rx_adapter_conf *conf, void *arg)
+{
+ int ret;
+ struct rte_eventdev *dev;
+ struct rte_event_dev_config dev_conf;
+ int started;
+ uint8_t port_id;
+ struct rte_event_port_conf *port_conf = arg;
+ struct rte_event_eth_rx_adapter *rx_adapter = id_to_rx_adapter(id);
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
+ dev_id);
+ if (started)
+ rte_event_dev_start(dev_id);
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
+ port_id);
+ return ret;
+ }
+
+ conf->event_port_id = port_id;
+ conf->max_nb_rx = 128;
+ if (started)
+ rte_event_dev_start(dev_id);
+ rx_adapter->default_cb_arg = 1;
+ return ret;
+}
+
+static int
+init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+{
+ int ret;
+ struct rte_service_spec service;
+ struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
+
+ if (rx_adapter->service_inited)
+ return 0;
+
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
+ "rte_event_eth_rx_adapter_%d", id);
+ service.socket_id = rx_adapter->socket_id;
+ service.callback = event_eth_rx_adapter_service_func;
+ service.callback_userdata = rx_adapter;
+ /* Service function handles locking for queue add/del updates */
+ service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
+ ret = rte_service_component_register(&service, &rx_adapter->service_id);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
+ service.name, ret);
+ return ret;
+ }
+
+ ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
+ &rx_adapter_conf, rx_adapter->conf_arg);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
+ ret);
+ goto err_done;
+ }
+ rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
+ rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
+ rx_adapter->service_inited = 1;
+ return 0;
+
+err_done:
+ rte_service_component_unregister(rx_adapter->service_id);
+ return ret;
+}
+
+
+static void
+update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id,
+ uint8_t add)
+{
+ struct eth_rx_queue_info *queue_info;
+ int enabled;
+ uint16_t i;
+
+ if (dev_info->rx_queue == NULL)
+ return;
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ update_queue_info(rx_adapter, dev_info, i, add);
+ } else {
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ enabled = queue_info->queue_enabled;
+ if (add) {
+ rx_adapter->nb_queues += !enabled;
+ dev_info->nb_dev_queues += !enabled;
+ } else {
+ rx_adapter->nb_queues -= enabled;
+ dev_info->nb_dev_queues -= enabled;
+ }
+ queue_info->queue_enabled = !!add;
+ }
+}
+
+static int
+event_eth_rx_adapter_queue_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ struct eth_rx_queue_info *queue_info;
+
+ if (rx_adapter->nb_queues == 0)
+ return 0;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ rx_adapter->num_rx_polled -= queue_info->queue_enabled;
+ update_queue_info(rx_adapter, dev_info, rx_queue_id, 0);
+ return 0;
+}
+
+static void
+event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *conf)
+
+{
+ struct eth_rx_queue_info *queue_info;
+ const struct rte_event *ev = &conf->ev;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ queue_info->event_queue_id = ev->queue_id;
+ queue_info->sched_type = ev->sched_type;
+ queue_info->priority = ev->priority;
+ queue_info->wt = conf->servicing_weight;
+
+ if (conf->rx_queue_flags &
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
+ queue_info->flow_id = ev->flow_id;
+ queue_info->flow_id_mask = ~0;
+ }
+
+ /* The same queue can be added more than once */
+ rx_adapter->num_rx_polled += !queue_info->queue_enabled;
+ update_queue_info(rx_adapter, dev_info, rx_queue_id, 1);
+}
+
+static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint8_t eth_dev_id,
+ int rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
+ struct rte_event_eth_rx_adapter_queue_conf temp_conf;
+ uint32_t i;
+ int ret;
+
+ if (queue_conf->servicing_weight == 0) {
+
+ struct rte_eth_dev_data *data = dev_info->dev->data;
+ if (data->dev_conf.intr_conf.rxq) {
+ RTE_EDEV_LOG_ERR("Interrupt driven queues"
+ " not supported");
+ return -ENOTSUP;
+ }
+ temp_conf = *queue_conf;
+
+ /* If Rx interrupts are disabled set wt = 1 */
+ temp_conf.servicing_weight = 1;
+ queue_conf = &temp_conf;
+ }
+
+ if (dev_info->rx_queue == NULL) {
+ dev_info->rx_queue =
+ rte_zmalloc_socket(rx_adapter->mem_name,
+ dev_info->dev->data->nb_rx_queues *
+ sizeof(struct eth_rx_queue_info), 0,
+ rx_adapter->socket_id);
+ if (dev_info->rx_queue == NULL)
+ return -ENOMEM;
+ }
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ event_eth_rx_adapter_queue_add(rx_adapter,
+ dev_info, i,
+ queue_conf);
+ } else {
+ event_eth_rx_adapter_queue_add(rx_adapter, dev_info,
+ (uint16_t)rx_queue_id,
+ queue_conf);
+ }
+
+ ret = eth_poll_wrr_calc(rx_adapter);
+ if (ret) {
+ event_eth_rx_adapter_queue_del(rx_adapter,
+ dev_info, rx_queue_id);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+rx_adapter_ctrl(uint8_t id, int start)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+ int use_service = 0;
+ int stop = !start;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ dev_info = &rx_adapter->eth_devices[i];
+ /* if start check for num dev queues */
+ if (start && !dev_info->nb_dev_queues)
+ continue;
+ /* if stop check if dev has been started */
+ if (stop && !dev_info->dev_rx_started)
+ continue;
+ use_service |= !dev_info->internal_event_port;
+ dev_info->dev_rx_started = start;
+ if (dev_info->internal_event_port == 0)
+ continue;
+ start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
+ &rte_eth_devices[i]) :
+ (*dev->dev_ops->eth_rx_adapter_stop)(dev,
+ &rte_eth_devices[i]);
+ }
+
+ if (use_service)
+ rte_service_runstate_set(rx_adapter->service_id, start);
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_eth_rx_adapter_conf_cb conf_cb,
+ void *conf_arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ int ret;
+ int socket_id;
+ uint8_t i;
+ char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
+ const uint8_t default_rss_key[] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
+ };
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (conf_cb == NULL)
+ return -EINVAL;
+
+ if (event_eth_rx_adapter == NULL) {
+ ret = rte_event_eth_rx_adapter_init();
+ if (ret)
+ return ret;
+ }
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter != NULL) {
+ RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
+ return -EEXIST;
+ }
+
+ socket_id = rte_event_dev_socket_id(dev_id);
+ snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
+ "rte_event_eth_rx_adapter_%d",
+ id);
+
+ rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rx_adapter == NULL) {
+ RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
+ return -ENOMEM;
+ }
+
+ rx_adapter->eventdev_id = dev_id;
+ rx_adapter->socket_id = socket_id;
+ rx_adapter->conf_cb = conf_cb;
+ rx_adapter->conf_arg = conf_arg;
+ strcpy(rx_adapter->mem_name, mem_name);
+ rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
+ rte_eth_dev_count() *
+ sizeof(struct eth_device_info), 0,
+ socket_id);
+ rte_convert_rss_key((const uint32_t *)default_rss_key,
+ (uint32_t *)rx_adapter->rss_key_be,
+ RTE_DIM(default_rss_key));
+
+ if (rx_adapter->eth_devices == NULL) {
+ RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
+ rte_free(rx_adapter);
+ return -ENOMEM;
+ }
+ rte_spinlock_init(&rx_adapter->rx_lock);
+ for (i = 0; i < rte_eth_dev_count(); i++)
+ rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
+
+ event_eth_rx_adapter[id] = rx_adapter;
+ if (conf_cb == default_conf_cb)
+ rx_adapter->default_cb_arg = 1;
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config)
+{
+ struct rte_event_port_conf *pc;
+ int ret;
+
+ if (port_config == NULL)
+ return -EINVAL;
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ pc = rte_malloc(NULL, sizeof(*pc), 0);
+ if (pc == NULL)
+ return -ENOMEM;
+ *pc = *port_config;
+ ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
+ default_conf_cb,
+ pc);
+ if (ret)
+ rte_free(pc);
+ return ret;
+}
+
+int
+rte_event_eth_rx_adapter_free(uint8_t id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ if (rx_adapter->nb_queues) {
+ RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
+ rx_adapter->nb_queues);
+ return -EBUSY;
+ }
+
+ if (rx_adapter->default_cb_arg)
+ rte_free(rx_adapter->conf_arg);
+ rte_free(rx_adapter->eth_devices);
+ rte_free(rx_adapter);
+ event_eth_rx_adapter[id] = NULL;
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_add(uint8_t id,
+ uint8_t eth_dev_id,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret;
+ uint32_t cap;
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ int start_service;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if ((rx_adapter == NULL) || (queue_conf == NULL))
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+ "eth port %" PRIu8, id, eth_dev_id);
+ return ret;
+ }
+
+ if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
+ && (queue_conf->rx_queue_flags &
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
+ RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
+ " eth port: %" PRIu8 " adapter id: %" PRIu8,
+ eth_dev_id, id);
+ return -EINVAL;
+ }
+
+ if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
+ (rx_queue_id != -1)) {
+ RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
+ "event queue id %u eth port %u", id, eth_dev_id);
+ return -EINVAL;
+ }
+
+ if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
+ rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
+ (uint16_t)rx_queue_id);
+ return -EINVAL;
+ }
+
+ start_service = 0;
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
+ -ENOTSUP);
+ if (dev_info->rx_queue == NULL) {
+ dev_info->rx_queue =
+ rte_zmalloc_socket(rx_adapter->mem_name,
+ dev_info->dev->data->nb_rx_queues *
+ sizeof(struct eth_rx_queue_info), 0,
+ rx_adapter->socket_id);
+ if (dev_info->rx_queue == NULL)
+ return -ENOMEM;
+ }
+
+ ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
+ &rte_eth_devices[eth_dev_id],
+ rx_queue_id, queue_conf);
+ if (ret == 0) {
+ update_queue_info(rx_adapter,
+ &rx_adapter->eth_devices[eth_dev_id],
+ rx_queue_id,
+ 1);
+ }
+ } else {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ ret = init_service(rx_adapter, id);
+ if (ret == 0)
+ ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,
+ queue_conf);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ if (ret == 0)
+ start_service = !!sw_rx_adapter_queue_count(rx_adapter);
+ }
+
+ if (ret)
+ return ret;
+
+ if (start_service)
+ rte_service_component_runstate_set(rx_adapter->service_id, 1);
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+ int32_t rx_queue_id)
+{
+ int ret = 0;
+ struct rte_eventdev *dev;
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct eth_device_info *dev_info;
+ uint32_t cap;
+ uint16_t i;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret)
+ return ret;
+
+ if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
+ rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
+ (uint16_t)rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
+ -ENOTSUP);
+ ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
+ &rte_eth_devices[eth_dev_id],
+ rx_queue_id);
+ if (ret == 0) {
+ update_queue_info(rx_adapter,
+ &rx_adapter->eth_devices[eth_dev_id],
+ rx_queue_id,
+ 0);
+ if (dev_info->nb_dev_queues == 0) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+ }
+ } else {
+ int rc;
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ event_eth_rx_adapter_queue_del(rx_adapter,
+ dev_info,
+ i);
+ } else {
+ event_eth_rx_adapter_queue_del(rx_adapter,
+ dev_info,
+ (uint16_t)rx_queue_id);
+ }
+
+ rc = eth_poll_wrr_calc(rx_adapter);
+ if (rc)
+ RTE_EDEV_LOG_ERR("WRR recalculation failed %" PRId32,
+ rc);
+
+ if (dev_info->nb_dev_queues == 0) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ rte_service_component_runstate_set(rx_adapter->service_id,
+ sw_rx_adapter_queue_count(rx_adapter));
+ }
+
+ return ret;
+}
+
+
+int
+rte_event_eth_rx_adapter_start(uint8_t id)
+{
+ return rx_adapter_ctrl(id, 1);
+}
+
+int
+rte_event_eth_rx_adapter_stop(uint8_t id)
+{
+ return rx_adapter_ctrl(id, 0);
+}
+
+int
+rte_event_eth_rx_adapter_stats_get(uint8_t id,
+ struct rte_event_eth_rx_adapter_stats *stats)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
+ struct rte_event_eth_rx_adapter_stats dev_stats;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+ int ret;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL || stats == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ dev_info = &rx_adapter->eth_devices[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->eth_rx_adapter_stats_get == NULL)
+ continue;
+ ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
+ &rte_eth_devices[i],
+ &dev_stats);
+ if (ret)
+ continue;
+ dev_stats_sum.rx_packets += dev_stats.rx_packets;
+ dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
+ }
+
+ if (rx_adapter->service_inited)
+ *stats = rx_adapter->stats;
+
+ stats->rx_packets += dev_stats_sum.rx_packets;
+ stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_stats_reset(uint8_t id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ dev_info = &rx_adapter->eth_devices[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
+ continue;
+ (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
+ &rte_eth_devices[i]);
+ }
+
+ memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL || service_id == NULL)
+ return -EINVAL;
+
+ if (rx_adapter->service_inited)
+ *service_id = rx_adapter->service_id;
+
+ return rx_adapter->service_inited ? 0 : -ESRCH;
+}
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
new file mode 100644
index 00000000..6a9e7edf
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
@@ -0,0 +1,444 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENT_ETH_RX_ADAPTER_
+#define _RTE_EVENT_ETH_RX_ADAPTER_
+
+/**
+ * @file
+ *
+ * RTE Event Ethernet Rx Adapter
+ *
+ * An eventdev-based packet processing application enqueues/dequeues mbufs
+ * to/from the event device. Packet flow from the ethernet device to the event
+ * device can be accomplished using either HW or SW mechanisms depending on the
+ * platform and the particular combination of ethernet and event devices. The
+ * event ethernet Rx adapter provides common APIs to configure the packet flow
+ * from the ethernet devices to event devices across both these transfer
+ * mechanisms.
+ *
+ * The adapter uses a EAL service core function for SW based packet transfer
+ * and uses the eventdev PMD functions to configure HW based packet transfer
+ * between the ethernet device and the event device.
+ *
+ * The ethernet Rx event adapter's functions are:
+ * - rte_event_eth_rx_adapter_create_ext()
+ * - rte_event_eth_rx_adapter_create()
+ * - rte_event_eth_rx_adapter_free()
+ * - rte_event_eth_rx_adapter_queue_add()
+ * - rte_event_eth_rx_adapter_queue_del()
+ * - rte_event_eth_rx_adapter_start()
+ * - rte_event_eth_rx_adapter_stop()
+ * - rte_event_eth_rx_adapter_stats_get()
+ * - rte_event_eth_rx_adapter_stats_reset()
+ *
+ * The application creates an ethernet to event adapter using
+ * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
+ * functions.
+ * The adapter needs to know which ethernet rx queues to poll for mbufs as well
+ * as event device parameters such as the event queue identifier, event
+ * priority and scheduling type that the adapter should use when constructing
+ * events. The rte_event_eth_rx_adapter_queue_add() function is provided for
+ * this purpose.
+ * The servicing weight parameter in the rte_event_eth_rx_adapter_queue_conf
+ * is applicable when the Rx adapter uses a service core function and is
+ * intended to provide application control of the frequency of polling ethernet
+ * device receive queues, for example, the application may want to poll higher
+ * priority queues with a higher frequency but at the same time not starve
+ * lower priority queues completely. If this parameter is zero and the receive
+ * interrupt is enabled when configuring the device, the receive queue is
+ * interrupt driven; else, the queue is assigned a servicing weight of one.
+ *
+ * The application can start/stop the adapter using the
+ * rte_event_eth_rx_adapter_start() and the rte_event_eth_rx_adapter_stop()
+ * functions. If the adapter uses a rte_service function, then the application
+ * is also required to assign a core to the service function and control the
+ * service core using the rte_service APIs. The
+ * rte_event_eth_rx_adapter_service_id_get() function can be used to retrieve
+ * the service function ID of the adapter in this case.
+ *
+ * Note: Interrupt driven receive queues are currently unimplemented.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_service.h>
+
+#include "rte_eventdev.h"
+
+#define RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE 32
+
+/* struct rte_event_eth_rx_adapter_queue_conf flags definitions */
+#define RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID 0x1
+/**< This flag indicates the flow identifier is valid
+ * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Adapter configuration structure that the adapter configuration callback
+ * function is expected to fill out
+ * @see rte_event_eth_rx_adapter_conf_cb
+ */
+struct rte_event_eth_rx_adapter_conf {
+ uint8_t event_port_id;
+ /**< Event port identifier, the adapter enqueues mbuf events to this
+ * port.
+ */
+ uint32_t max_nb_rx;
+ /**< The adapter can return early if it has processed at least
+ * max_nb_rx mbufs. This isn't treated as a requirement; batching may
+ * cause the adapter to process more than max_nb_rx mbufs.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Function type used for adapter configuration callback. The callback is
+ * used to fill in members of the struct rte_event_eth_rx_adapter_conf, this
+ * callback is invoked when creating a SW service for packet transfer from
+ * ethdev queues to the event device. The SW service is created within the
+ * rte_event_eth_rx_adapter_queue_add() function if SW based packet transfers
+ * from ethdev queues to the event device are required.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param [out] conf
+ * Structure that needs to be populated by this callback.
+ *
+ * @param arg
+ * Argument to the callback. This is the same as the conf_arg passed to the
+ * rte_event_eth_rx_adapter_create_ext().
+ */
+typedef int (*rte_event_eth_rx_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
+ struct rte_event_eth_rx_adapter_conf *conf,
+ void *arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Rx queue configuration structure
+ */
+struct rte_event_eth_rx_adapter_queue_conf {
+ uint32_t rx_queue_flags;
+ /**< Flags for handling received packets
+ * @see RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID
+ */
+ uint16_t servicing_weight;
+ /**< Relative polling frequency of ethernet receive queue when the
+ * adapter uses a service core function for ethernet to event device
+ * transfers. If it is set to zero, the Rx queue is interrupt driven
+ * (unless rx queue interrupts are not enabled for the ethernet
+ * device).
+ */
+ struct rte_event ev;
+ /**<
+ * The values from the following event fields will be used when
+ * queuing mbuf events:
+ * - event_queue_id: Targeted event queue ID for received packets.
+ * - event_priority: Event priority of packets from this Rx queue in
+ * the event queue relative to other events.
+ * - sched_type: Scheduling type for packets from this Rx queue.
+ * - flow_id: If the RTE_ETH_RX_EVENT_ADAPTER_QUEUE_FLOW_ID_VALID bit
+ * is set in rx_queue_flags, this flow_id is used for all
+ * packets received from this queue. Otherwise the flow ID
+ * is set to the RSS hash of the src and dst IPv4/6
+ * addresses.
+ *
+ * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
+ * enqueued event.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * A structure used to retrieve statistics for an eth rx adapter instance.
+ */
+struct rte_event_eth_rx_adapter_stats {
+ uint64_t rx_poll_count;
+ /**< Receive queue poll count */
+ uint64_t rx_packets;
+ /**< Received packet count */
+ uint64_t rx_enq_count;
+ /**< Eventdev enqueue count */
+ uint64_t rx_enq_retry;
+ /**< Eventdev enqueue retry count */
+ uint64_t rx_enq_start_ts;
+ /**< Rx enqueue start timestamp */
+ uint64_t rx_enq_block_cycles;
+ /**< Cycles for which the service is blocked by the event device,
+ * i.e, the service fails to enqueue to the event device.
+ */
+ uint64_t rx_enq_end_ts;
+ /**< Latest timestamp at which the service is unblocked
+ * by the event device. The start, end timestamps and
+ * block cycles can be used to compute the percentage of
+ * cycles the service is blocked by the event device.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new ethernet Rx event adapter with the specified identifier.
+ *
+ * @param id
+ * The identifier of the ethernet Rx event adapter.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ *
+ * @param conf_cb
+ * Callback function that fills in members of a
+ * struct rte_event_eth_rx_adapter_conf struct passed into
+ * it.
+ *
+ * @param conf_arg
+ * Argument that is passed to the conf_cb function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_eth_rx_adapter_conf_cb conf_cb,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new ethernet Rx event adapter with the specified identifier.
+ * This function uses an internal configuration function that creates an event
+ * port. This default function reconfigures the event device with an
+ * additional event port and setups up the event port using the port_config
+ * parameter passed into this function. In case the application needs more
+ * control in configuration of the service, it should use the
+ * rte_event_eth_rx_adapter_create_ext() version.
+ *
+ * @param id
+ * The identifier of the ethernet Rx event adapter.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ *
+ * @param port_config
+ * Argument of type *rte_event_port_conf* that is passed to the conf_cb
+ * function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, If the adapter still has Rx queues
+ * added to it, the function returns -EBUSY.
+ */
+int rte_event_eth_rx_adapter_free(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Add receive queue to an event adapter. After a queue has been
+ * added to the event adapter, the result of the application calling
+ * rte_eth_rx_burst(eth_dev_id, rx_queue_id, ..) is undefined.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index.
+ * If rx_queue_id is -1, then all Rx queues configured for
+ * the device are added. If the ethdev Rx queues can only be
+ * connected to a single event queue then rx_queue_id is
+ * required to be -1.
+ * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
+ *
+ * @param conf
+ * Additional configuration structure of type *rte_event_eth_rx_adapter_conf*
+ *
+ * @return
+ * - 0: Success, Receive queue added correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_queue_add(uint8_t id,
+ uint8_t eth_dev_id,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Delete receive queue from an event adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index.
+ * If rx_queue_id is -1, then all Rx queues configured for
+ * the device are deleted. If the ethdev Rx queues can only be
+ * connected to a single event queue then rx_queue_id is
+ * required to be -1.
+ * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
+ *
+ * @return
+ * - 0: Success, Receive queue deleted correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+ int32_t rx_queue_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start ethernet Rx event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, Adapter started correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_start(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop ethernet Rx event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, Adapter started correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stop(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] stats
+ * A pointer to structure used to retrieve statistics for an adapter.
+ *
+ * @return
+ * - 0: Success, retrieved successfully.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stats_get(uint8_t id,
+ struct rte_event_eth_rx_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, statistics reset successfully.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stats_reset(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the service ID of an adapter. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the adapter doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_EVENT_ETH_RX_ADAPTER_ */
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index bbb38050..ce6a5dc1 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -56,6 +56,7 @@
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_errno.h>
+#include <rte_ethdev.h>
#include "rte_eventdev.h"
#include "rte_eventdev_pmd.h"
@@ -128,55 +129,77 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
return 0;
}
+int
+rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
+ uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
+
+ dev = &rte_eventdevs[dev_id];
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->eth_rx_adapter_caps_get ?
+ (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
+ &rte_eth_devices[eth_port_id],
+ caps)
+ : 0;
+}
+
static inline int
rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
{
uint8_t old_nb_queues = dev->data->nb_queues;
- uint8_t *queues_prio;
+ struct rte_event_queue_conf *queues_cfg;
unsigned int i;
RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
dev->data->dev_id);
/* First time configuration */
- if (dev->data->queues_prio == NULL && nb_queues != 0) {
- /* Allocate memory to store queue priority */
- dev->data->queues_prio = rte_zmalloc_socket(
- "eventdev->data->queues_prio",
- sizeof(dev->data->queues_prio[0]) * nb_queues,
+ if (dev->data->queues_cfg == NULL && nb_queues != 0) {
+ /* Allocate memory to store queue configuration */
+ dev->data->queues_cfg = rte_zmalloc_socket(
+ "eventdev->data->queues_cfg",
+ sizeof(dev->data->queues_cfg[0]) * nb_queues,
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
- if (dev->data->queues_prio == NULL) {
+ if (dev->data->queues_cfg == NULL) {
dev->data->nb_queues = 0;
- RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
+ RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
"nb_queues %u", nb_queues);
return -(ENOMEM);
}
/* Re-configure */
- } else if (dev->data->queues_prio != NULL && nb_queues != 0) {
+ } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
for (i = nb_queues; i < old_nb_queues; i++)
(*dev->dev_ops->queue_release)(dev, i);
- /* Re allocate memory to store queue priority */
- queues_prio = dev->data->queues_prio;
- queues_prio = rte_realloc(queues_prio,
- sizeof(queues_prio[0]) * nb_queues,
+ /* Re allocate memory to store queue configuration */
+ queues_cfg = dev->data->queues_cfg;
+ queues_cfg = rte_realloc(queues_cfg,
+ sizeof(queues_cfg[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
- if (queues_prio == NULL) {
- RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
+ if (queues_cfg == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
" nb_queues %u", nb_queues);
return -(ENOMEM);
}
- dev->data->queues_prio = queues_prio;
+ dev->data->queues_cfg = queues_cfg;
if (nb_queues > old_nb_queues) {
uint8_t new_qs = nb_queues - old_nb_queues;
- memset(queues_prio + old_nb_queues, 0,
- sizeof(queues_prio[0]) * new_qs);
+ memset(queues_cfg + old_nb_queues, 0,
+ sizeof(queues_cfg[0]) * new_qs);
}
- } else if (dev->data->queues_prio != NULL && nb_queues == 0) {
+ } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
for (i = nb_queues; i < old_nb_queues; i++)
@@ -195,8 +218,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
uint8_t old_nb_ports = dev->data->nb_ports;
void **ports;
uint16_t *links_map;
- uint8_t *ports_dequeue_depth;
- uint8_t *ports_enqueue_depth;
+ struct rte_event_port_conf *ports_cfg;
unsigned int i;
RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
@@ -214,26 +236,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
return -(ENOMEM);
}
- /* Allocate memory to store ports dequeue depth */
- dev->data->ports_dequeue_depth =
- rte_zmalloc_socket("eventdev->ports_dequeue_depth",
- sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,
+ /* Allocate memory to store port configurations */
+ dev->data->ports_cfg =
+ rte_zmalloc_socket("eventdev->ports_cfg",
+ sizeof(dev->data->ports_cfg[0]) * nb_ports,
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
- if (dev->data->ports_dequeue_depth == NULL) {
+ if (dev->data->ports_cfg == NULL) {
dev->data->nb_ports = 0;
- RTE_EDEV_LOG_ERR("failed to get mem for port deq meta,"
- "nb_ports %u", nb_ports);
- return -(ENOMEM);
- }
-
- /* Allocate memory to store ports enqueue depth */
- dev->data->ports_enqueue_depth =
- rte_zmalloc_socket("eventdev->ports_enqueue_depth",
- sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,
- RTE_CACHE_LINE_SIZE, dev->data->socket_id);
- if (dev->data->ports_enqueue_depth == NULL) {
- dev->data->nb_ports = 0;
- RTE_EDEV_LOG_ERR("failed to get mem for port enq meta,"
+ RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
"nb_ports %u", nb_ports);
return -(ENOMEM);
}
@@ -257,8 +267,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
ports = dev->data->ports;
- ports_dequeue_depth = dev->data->ports_dequeue_depth;
- ports_enqueue_depth = dev->data->ports_enqueue_depth;
+ ports_cfg = dev->data->ports_cfg;
links_map = dev->data->links_map;
for (i = nb_ports; i < old_nb_ports; i++)
@@ -273,22 +282,12 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
return -(ENOMEM);
}
- /* Realloc memory for ports_dequeue_depth */
- ports_dequeue_depth = rte_realloc(ports_dequeue_depth,
- sizeof(ports_dequeue_depth[0]) * nb_ports,
+ /* Realloc memory for ports_cfg */
+ ports_cfg = rte_realloc(ports_cfg,
+ sizeof(ports_cfg[0]) * nb_ports,
RTE_CACHE_LINE_SIZE);
- if (ports_dequeue_depth == NULL) {
- RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta,"
- " nb_ports %u", nb_ports);
- return -(ENOMEM);
- }
-
- /* Realloc memory for ports_enqueue_depth */
- ports_enqueue_depth = rte_realloc(ports_enqueue_depth,
- sizeof(ports_enqueue_depth[0]) * nb_ports,
- RTE_CACHE_LINE_SIZE);
- if (ports_enqueue_depth == NULL) {
- RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta,"
+ if (ports_cfg == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
" nb_ports %u", nb_ports);
return -(ENOMEM);
}
@@ -314,18 +313,15 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
memset(ports + old_nb_ports, 0,
sizeof(ports[0]) * new_ps);
- memset(ports_dequeue_depth + old_nb_ports, 0,
- sizeof(ports_dequeue_depth[0]) * new_ps);
- memset(ports_enqueue_depth + old_nb_ports, 0,
- sizeof(ports_enqueue_depth[0]) * new_ps);
+ memset(ports_cfg + old_nb_ports, 0,
+ sizeof(ports_cfg[0]) * new_ps);
for (i = old_links_map_end; i < links_map_end; i++)
links_map[i] =
EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
}
dev->data->ports = ports;
- dev->data->ports_dequeue_depth = ports_dequeue_depth;
- dev->data->ports_enqueue_depth = ports_enqueue_depth;
+ dev->data->ports_cfg = ports_cfg;
dev->data->links_map = links_map;
} else if (dev->data->ports != NULL && nb_ports == 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
@@ -519,13 +515,13 @@ rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
static inline int
is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
{
- if (queue_conf && (
- ((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ if (queue_conf &&
+ !(queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ (queue_conf->schedule_type
+ == RTE_SCHED_TYPE_ATOMIC)
))
return 1;
else
@@ -535,13 +531,13 @@ is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
static inline int
is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
{
- if (queue_conf && (
- ((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ if (queue_conf &&
+ !(queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ (queue_conf->schedule_type
+ == RTE_SCHED_TYPE_ORDERED)
))
return 1;
else
@@ -605,31 +601,10 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
queue_conf = &def_conf;
}
- dev->data->queues_prio[queue_id] = queue_conf->priority;
+ dev->data->queues_cfg[queue_id] = *queue_conf;
return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
}
-uint8_t
-rte_event_queue_count(uint8_t dev_id)
-{
- struct rte_eventdev *dev;
-
- dev = &rte_eventdevs[dev_id];
- return dev->data->nb_queues;
-}
-
-uint8_t
-rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
-{
- struct rte_eventdev *dev;
-
- dev = &rte_eventdevs[dev_id];
- if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
- return dev->data->queues_prio[queue_id];
- else
- return RTE_EVENT_DEV_PRIORITY_NORMAL;
-}
-
static inline int
is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
{
@@ -726,10 +701,7 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
port_conf = &def_conf;
}
- dev->data->ports_dequeue_depth[port_id] =
- port_conf->dequeue_depth;
- dev->data->ports_enqueue_depth[port_id] =
- port_conf->enqueue_depth;
+ dev->data->ports_cfg[port_id] = *port_conf;
diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
@@ -743,31 +715,110 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
return 0;
}
-uint8_t
-rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
+int
+rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->ports_dequeue_depth[port_id];
+
+ switch (attr_id) {
+ case RTE_EVENT_DEV_ATTR_PORT_COUNT:
+ *attr_value = dev->data->nb_ports;
+ break;
+ case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
+ *attr_value = dev->data->nb_queues;
+ break;
+ case RTE_EVENT_DEV_ATTR_STARTED:
+ *attr_value = dev->data->dev_started;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
-uint8_t
-rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
+int
+rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->ports_enqueue_depth[port_id];
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ switch (attr_id) {
+ case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
+ *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
+ break;
+ case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
+ *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
+ break;
+ case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
+ *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
}
-uint8_t
-rte_event_port_count(uint8_t dev_id)
+int
+rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
+ struct rte_event_queue_conf *conf;
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->nb_ports;
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ conf = &dev->data->queues_cfg[queue_id];
+
+ switch (attr_id) {
+ case RTE_EVENT_QUEUE_ATTR_PRIORITY:
+ *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
+ *attr_value = conf->priority;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
+ *attr_value = conf->nb_atomic_flows;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
+ *attr_value = conf->nb_atomic_order_sequences;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
+ *attr_value = conf->event_queue_cfg;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
+ if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
+ return -EOVERFLOW;
+
+ *attr_value = conf->schedule_type;
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
}
int
@@ -912,6 +963,23 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
}
int
+rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (service_id == NULL)
+ return -EINVAL;
+
+ if (dev->data->service_inited)
+ *service_id = dev->data->service_id;
+
+ return dev->data->service_inited ? 0 : -ESRCH;
+}
+
+int
rte_event_dev_dump(uint8_t dev_id, FILE *f)
{
struct rte_eventdev *dev;
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index 128bc522..f1949ff7 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -218,10 +218,10 @@
* (each worker thread schedules events to its own port) or centralized
* (a dedicated thread schedules to all ports). Distributed software schedulers
* perform the scheduling in rte_event_dequeue_burst(), whereas centralized
- * scheduler logic is located in rte_event_schedule().
+ * scheduler logic need a dedicated service core for scheduling.
* The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
* indicates the device is centralized and thus needs a dedicated scheduling
- * thread that repeatedly calls rte_event_schedule().
+ * thread that repeatedly calls software specific scheduling function.
*
* An event driven worker thread has following typical workflow on fastpath:
* \code{.c}
@@ -263,16 +263,16 @@ struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
* In distributed scheduling mode, event scheduling happens in HW or
* rte_event_dequeue_burst() or the combination of these two.
* If the flag is not set then eventdev is centralized and thus needs a
- * dedicated scheduling thread that repeatedly calls rte_event_schedule().
+ * dedicated service core that acts as a scheduling thread .
*
- * @see rte_event_schedule(), rte_event_dequeue_burst()
+ * @see rte_event_dequeue_burst()
*/
#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
/**< Event device is capable of enqueuing events of any type to any queue.
* If this capability is not set, the queue only supports events of the
- * *RTE_EVENT_QUEUE_CFG_* type that it was created with.
+ * *RTE_SCHED_TYPE_* type that it was created with.
*
- * @see RTE_EVENT_QUEUE_CFG_* values
+ * @see RTE_SCHED_TYPE_* values
*/
#define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
/**< Event device is capable of operating in burst mode for enqueue(forward,
@@ -399,6 +399,36 @@ struct rte_event_dev_info {
int
rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
+/**
+ * The count of ports.
+ */
+#define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
+/**
+ * The count of queues.
+ */
+#define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
+/**
+ * The status of the device, zero for stopped, non-zero for started.
+ */
+#define RTE_EVENT_DEV_ATTR_STARTED 2
+
+/**
+ * Get an attribute from a device.
+ *
+ * @param dev_id Eventdev id
+ * @param attr_id The attribute ID to retrieve
+ * @param[out] attr_value A pointer that will be filled in with the attribute
+ * value if successful.
+ *
+ * @retval 0 Successfully retrieved attribute value
+ * -EINVAL Invalid device or *attr_id* provided, or *attr_value*
+ * is NULL
+ */
+int
+rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
+ uint32_t *attr_value);
+
+
/* Event device configuration bitmap flags */
#define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
/**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
@@ -485,39 +515,13 @@ rte_event_dev_configure(uint8_t dev_id,
/* Event queue specific APIs */
/* Event queue configuration bitmap flags */
-#define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
-/**< Mask for event queue schedule type configuration request */
-#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
+#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
/**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
*
* @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
* @see rte_event_enqueue_burst()
*/
-#define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
-/**< Allow only ATOMIC schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with ATOMIC only and sched_type != RTE_SCHED_TYPE_ATOMIC
- *
- * @see RTE_SCHED_TYPE_ATOMIC, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
-/**< Allow only ORDERED schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with ORDERED only and sched_type != RTE_SCHED_TYPE_ORDERED
- *
- * @see RTE_SCHED_TYPE_ORDERED, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
-/**< Allow only PARALLEL schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with PARALLEL only and sched_type != RTE_SCHED_TYPE_PARALLEL
- *
- * @see RTE_SCHED_TYPE_PARALLEL, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
+#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
/**< This event queue links only to a single event port.
*
* @see rte_event_port_setup(), rte_event_port_link()
@@ -528,8 +532,8 @@ struct rte_event_queue_conf {
uint32_t nb_atomic_flows;
/**< The maximum number of active flows this queue can track at any
* given time. If the queue is configured for atomic scheduling (by
- * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES or
- * RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY flags to event_queue_cfg), then the
+ * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
+ * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
* value must be in the range of [1, nb_event_queue_flows], which was
* previously provided in rte_event_dev_configure().
*/
@@ -542,12 +546,18 @@ struct rte_event_queue_conf {
* event will be returned from dequeue until one or more entries are
* freed up/released.
* If the queue is configured for ordered scheduling (by applying the
- * RTE_EVENT_QUEUE_CFG_ALL_TYPES or RTE_EVENT_QUEUE_CFG_ORDERED_ONLY
- * flags to event_queue_cfg), then the value must be in the range of
- * [1, nb_event_queue_flows], which was previously supplied to
- * rte_event_dev_configure().
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
+ * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
+ * be in the range of [1, nb_event_queue_flows], which was
+ * previously supplied to rte_event_dev_configure().
+ */
+ uint32_t event_queue_cfg;
+ /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
+ uint8_t schedule_type;
+ /**< Queue schedule type(RTE_SCHED_TYPE_*).
+ * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
+ * event_queue_cfg.
*/
- uint32_t event_queue_cfg; /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
uint8_t priority;
/**< Priority for this event queue relative to other event queues.
* The requested priority should in the range of
@@ -607,31 +617,45 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
const struct rte_event_queue_conf *queue_conf);
/**
- * Get the number of event queues on a specific event device
- *
- * @param dev_id
- * Event device identifier.
- * @return
- * - The number of configured event queues
+ * The priority of the queue.
*/
-uint8_t
-rte_event_queue_count(uint8_t dev_id);
+#define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
+/**
+ * The number of atomic flows configured for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
+/**
+ * The number of atomic order sequences configured for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
+/**
+ * The cfg flags for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
+/**
+ * The schedule type of the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
/**
- * Get the priority of the event queue on a specific event device
- *
- * @param dev_id
- * Event device identifier.
- * @param queue_id
- * Event queue identifier.
- * @return
- * - If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then the
- * configured priority of the event queue in
- * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST] range
- * else the value RTE_EVENT_DEV_PRIORITY_NORMAL
+ * Get an attribute from a queue.
+ *
+ * @param dev_id Eventdev id
+ * @param queue_id Eventdev queue id
+ * @param attr_id The attribute ID to retrieve
+ * @param[out] attr_value A pointer that will be filled in with the attribute
+ * value if successful
+ *
+ * @retval 0 Successfully returned value
+ * -EINVAL invalid device, queue or attr_id provided, or attr_value
+ * was NULL
+ * -EOVERFLOW returned when attr_id is set to
+ * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES
*/
-uint8_t
-rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id);
+int
+rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
+ uint32_t *attr_value);
/* Event port specific APIs */
@@ -715,47 +739,33 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
const struct rte_event_port_conf *port_conf);
/**
- * Get the number of dequeue queue depth configured for event port designated
- * by its *port_id* on a specific event device
- *
- * @param dev_id
- * Event device identifier.
- * @param port_id
- * Event port identifier.
- * @return
- * - The number of configured dequeue queue depth
- *
- * @see rte_event_dequeue_burst()
+ * The queue depth of the port on the enqueue side
*/
-uint8_t
-rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id);
-
+#define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
/**
- * Get the number of enqueue queue depth configured for event port designated
- * by its *port_id* on a specific event device
- *
- * @param dev_id
- * Event device identifier.
- * @param port_id
- * Event port identifier.
- * @return
- * - The number of configured enqueue queue depth
- *
- * @see rte_event_enqueue_burst()
+ * The queue depth of the port on the dequeue side
*/
-uint8_t
-rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id);
+#define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
+/**
+ * The new event threshold of the port
+ */
+#define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
/**
- * Get the number of ports on a specific event device
+ * Get an attribute from a port.
*
- * @param dev_id
- * Event device identifier.
- * @return
- * - The number of configured ports
+ * @param dev_id Eventdev id
+ * @param port_id Eventdev port id
+ * @param attr_id The attribute ID to retrieve
+ * @param[out] attr_value A pointer that will be filled in with the attribute
+ * value if successful
+ *
+ * @retval 0 Successfully returned value
+ * -EINVAL Invalid device, port or attr_id, or attr_value was NULL
*/
-uint8_t
-rte_event_port_count(uint8_t dev_id);
+int
+rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
+ uint32_t *attr_value);
/**
* Start an event device.
@@ -871,6 +881,8 @@ rte_event_dev_close(uint8_t dev_id);
/**< The event generated from cpu for pipelining.
* Application may use *sub_event_type* to further classify the event
*/
+#define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
+/**< The event generated from event eth Rx adapter */
#define RTE_EVENT_TYPE_MAX 0x10
/**< Maximum number of event types */
@@ -882,7 +894,10 @@ rte_event_dev_close(uint8_t dev_id);
#define RTE_EVENT_OP_FORWARD 1
/**< The CPU use this operation to forward the event to different event queue or
* change to new application specific flow or schedule type to enable
- * pipelining
+ * pipelining.
+ *
+ * This operation must only be enqueued to the same port that the
+ * event to be forwarded was dequeued from.
*/
#define RTE_EVENT_OP_RELEASE 2
/**< Release the flow context associated with the schedule type.
@@ -912,6 +927,9 @@ rte_event_dev_close(uint8_t dev_id);
* or no scheduling context is held then this function may be an NOOP,
* depending on the implementation.
*
+ * This operation must only be enqueued to the same port that the
+ * event to be released was dequeued from.
+ *
*/
/**
@@ -990,14 +1008,50 @@ struct rte_event {
};
};
+/* Ethdev Rx adapter capability bitmap flags */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
+/**< This flag is sent when the packet transfer mechanism is in HW.
+ * Ethdev can send packets to the event device using internal event port.
+ */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
+/**< Adapter supports multiple event queues per ethdev. Every ethdev
+ * Rx queue can be connected to a unique event queue.
+ */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
+/**< The application can override the adapter generated flow ID in the
+ * event. This flow ID can be specified when adding an ethdev Rx queue
+ * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * @see struct rte_event_eth_rx_adapter_queue_conf::ev
+ * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
+ */
+
+/**
+ * Retrieve the event device's ethdev Rx adapter capabilities for the
+ * specified ethernet port
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param eth_port_id
+ * The identifier of the ethernet device.
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+int
+rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
+ uint32_t *caps);
struct rte_eventdev_driver;
struct rte_eventdev_ops;
struct rte_eventdev;
-typedef void (*event_schedule_t)(struct rte_eventdev *dev);
-/**< @internal Schedule one or more events in the event dev. */
-
typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
/**< @internal Enqueue event on port of a device */
@@ -1034,12 +1088,10 @@ struct rte_eventdev_data {
/**< Number of event ports. */
void **ports;
/**< Array of pointers to ports. */
- uint8_t *ports_dequeue_depth;
- /**< Array of port dequeue depth. */
- uint8_t *ports_enqueue_depth;
- /**< Array of port enqueue depth. */
- uint8_t *queues_prio;
- /**< Array of queue priority. */
+ struct rte_event_port_conf *ports_cfg;
+ /**< Array of port configuration structures. */
+ struct rte_event_queue_conf *queues_cfg;
+ /**< Array of queue configuration structures. */
uint16_t *links_map;
/**< Memory to store queues to port connections. */
void *dev_private;
@@ -1048,6 +1100,10 @@ struct rte_eventdev_data {
/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
struct rte_event_dev_config dev_conf;
/**< Configuration applied to device. */
+ uint8_t service_inited;
+ /* Service initialization state */
+ uint32_t service_id;
+ /* Service ID*/
RTE_STD_C11
uint8_t dev_started : 1;
@@ -1059,8 +1115,6 @@ struct rte_eventdev_data {
/** @internal The data structure associated with each event device. */
struct rte_eventdev {
- event_schedule_t schedule;
- /**< Pointer to PMD schedule function. */
event_enqueue_t enqueue;
/**< Pointer to PMD enqueue function. */
event_enqueue_burst_t enqueue_burst;
@@ -1089,24 +1143,6 @@ struct rte_eventdev {
extern struct rte_eventdev *rte_eventdevs;
/** @internal The pool of rte_eventdev structures. */
-
-/**
- * Schedule one or more events in the event dev.
- *
- * An event dev implementation may define this is a NOOP, for instance if
- * the event dev performs its scheduling in hardware.
- *
- * @param dev_id
- * The identifier of the device.
- */
-static inline void
-rte_event_schedule(uint8_t dev_id)
-{
- struct rte_eventdev *dev = &rte_eventdevs[dev_id];
- if (*dev->schedule)
- (*dev->schedule)(dev);
-}
-
static __rte_always_inline uint16_t
__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
const struct rte_event ev[], uint16_t nb_events,
@@ -1144,6 +1180,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
* The *nb_events* parameter is the number of event objects to enqueue which are
* supplied in the *ev* array of *rte_event* structure.
*
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
* The rte_event_enqueue_burst() function returns the number of
* events objects it actually enqueued. A return value equal to *nb_events*
* means that all event objects have been enqueued.
@@ -1346,6 +1385,9 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
* with RTE_EVENT_OP_RELEASE operation can be used to release the
* contexts early.
*
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
* @param dev_id
* The identifier of the device.
* @param port_id
@@ -1545,6 +1587,24 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
uint8_t queues[], uint8_t priorities[]);
/**
+ * Retrieve the service ID of the event dev. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the event dev doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int
+rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
+
+/**
* Dump internal information about *dev_id* to the FILE* provided in *f*.
*
* @param dev_id
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
index 3d72acf3..7a206c56 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -83,9 +83,19 @@ extern "C" {
} \
} while (0)
+#define RTE_EVENT_ETH_RX_ADAPTER_SW_CAP \
+ ((RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) | \
+ (RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ))
+
+/**< Ethernet Rx adapter cap to return If the packet transfers from
+ * the ethdev to eventdev use a SW service function
+ */
+
#define RTE_EVENTDEV_DETACHED (0)
#define RTE_EVENTDEV_ATTACHED (1)
+struct rte_eth_dev;
+
/** Global structure used for maintaining state of allocated event devices */
struct rte_eventdev_global {
uint8_t nb_devs; /**< Number of devices found */
@@ -429,6 +439,163 @@ typedef int (*eventdev_xstats_get_names_t)(const struct rte_eventdev *dev,
typedef uint64_t (*eventdev_xstats_get_by_name)(const struct rte_eventdev *dev,
const char *name, unsigned int *id);
+
+/**
+ * Retrieve the event device's ethdev Rx adapter capabilities for the
+ * specified ethernet port
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_caps_get_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps);
+
+struct rte_event_eth_rx_adapter_queue_conf *queue_conf;
+
+/**
+ * Add ethernet Rx queues to event device. This callback is invoked if
+ * the caps returned from rte_eventdev_eth_rx_adapter_caps_get(, eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index
+ *
+ * @param queue_conf
+ * Additional configuration structure
+
+ * @return
+ * - 0: Success, ethernet receive queue added successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_queue_add_t)(
+ const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+
+/**
+ * Delete ethernet Rx queues from event device. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(, eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index
+ *
+ * @return
+ * - 0: Success, ethernet receive queue deleted successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_queue_del_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id);
+
+/**
+ * Start ethernet Rx adapter. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(.., eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set and Rx queues
+ * from eth_port_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * - 0: Success, ethernet Rx adapter started successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_eth_rx_adapter_start_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+
+/**
+ * Stop ethernet Rx adapter. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(..,eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set and Rx queues
+ * from eth_port_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * - 0: Success, ethernet Rx adapter stopped successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_eth_rx_adapter_stop_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+
+struct rte_event_eth_rx_adapter_stats *stats;
+
+/**
+ * Retrieve ethernet Rx adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param[out] stats
+ * Pointer to stats structure
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_eth_rx_adapter_stats_get)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ struct rte_event_eth_rx_adapter_stats *stats);
+/**
+ * Reset ethernet Rx adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_stats_reset)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+
/** Event device operations function pointer table */
struct rte_eventdev_ops {
eventdev_info_get_t dev_infos_get; /**< Get device info. */
@@ -468,6 +635,21 @@ struct rte_eventdev_ops {
/**< Get one value by name. */
eventdev_xstats_reset_t xstats_reset;
/**< Reset the statistics values in xstats. */
+
+ eventdev_eth_rx_adapter_caps_get_t eth_rx_adapter_caps_get;
+ /**< Get ethernet Rx adapter capabilities */
+ eventdev_eth_rx_adapter_queue_add_t eth_rx_adapter_queue_add;
+ /**< Add Rx queues to ethernet Rx adapter */
+ eventdev_eth_rx_adapter_queue_del_t eth_rx_adapter_queue_del;
+ /**< Delete Rx queues from ethernet Rx adapter */
+ eventdev_eth_rx_adapter_start_t eth_rx_adapter_start;
+ /**< Start ethernet Rx adapter */
+ eventdev_eth_rx_adapter_stop_t eth_rx_adapter_stop;
+ /**< Stop ethernet Rx adapter */
+ eventdev_eth_rx_adapter_stats_get eth_rx_adapter_stats_get;
+ /**< Get ethernet Rx stats */
+ eventdev_eth_rx_adapter_stats_reset eth_rx_adapter_stats_reset;
+ /**< Reset ethernet Rx stats */
};
/**
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
index b6bd7319..ade32b5d 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd_pci.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
@@ -50,6 +50,7 @@ extern "C" {
#include <rte_eal.h>
#include <rte_lcore.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include "rte_eventdev_pmd.h"
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_vdev.h b/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
index 135e8b80..56232dec 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
@@ -48,7 +48,7 @@ extern "C" {
#include <rte_debug.h>
#include <rte_eal.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include "rte_eventdev_pmd.h"
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 4c48e5f0..108ae61f 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -19,17 +19,12 @@ DPDK_17.05 {
rte_event_port_default_conf_get;
rte_event_port_setup;
- rte_event_port_dequeue_depth;
- rte_event_port_enqueue_depth;
- rte_event_port_count;
rte_event_port_link;
rte_event_port_unlink;
rte_event_port_links_get;
rte_event_queue_default_conf_get;
rte_event_queue_setup;
- rte_event_queue_count;
- rte_event_queue_priority;
rte_event_dequeue_timeout_ticks;
@@ -51,3 +46,25 @@ DPDK_17.08 {
rte_event_ring_init;
rte_event_ring_lookup;
} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ rte_event_dev_attr_get;
+ rte_event_dev_service_id_get;
+ rte_event_port_attr_get;
+ rte_event_queue_attr_get;
+
+ rte_event_eth_rx_adapter_caps_get;
+ rte_event_eth_rx_adapter_create;
+ rte_event_eth_rx_adapter_create_ext;
+ rte_event_eth_rx_adapter_free;
+ rte_event_eth_rx_adapter_queue_add;
+ rte_event_eth_rx_adapter_queue_del;
+ rte_event_eth_rx_adapter_service_id_get;
+ rte_event_eth_rx_adapter_start;
+ rte_event_eth_rx_adapter_stats_get;
+ rte_event_eth_rx_adapter_stats_reset;
+ rte_event_eth_rx_adapter_stop;
+
+} DPDK_17.08;