aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_eventdev
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_eventdev')
-rw-r--r--lib/librte_eventdev/Makefile10
-rw-r--r--lib/librte_eventdev/rte_event_ring.c207
-rw-r--r--lib/librte_eventdev/rte_event_ring.h308
-rw-r--r--lib/librte_eventdev/rte_eventdev.c169
-rw-r--r--lib/librte_eventdev/rte_eventdev.h217
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h105
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd_pci.h162
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd_vdev.h134
-rw-r--r--lib/librte_eventdev/rte_eventdev_version.map9
9 files changed, 1012 insertions, 309 deletions
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
index e06346a6..410578a1 100644
--- a/lib/librte_eventdev/Makefile
+++ b/lib/librte_eventdev/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -34,7 +34,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_eventdev.a
# library version
-LIBABIVER := 1
+LIBABIVER := 2
# build flags
CFLAGS += -O3
@@ -42,10 +42,14 @@ CFLAGS += $(WERROR_FLAGS)
# library source files
SRCS-y += rte_eventdev.c
+SRCS-y += rte_event_ring.c
# export include files
SYMLINK-y-include += rte_eventdev.h
SYMLINK-y-include += rte_eventdev_pmd.h
+SYMLINK-y-include += rte_eventdev_pmd_pci.h
+SYMLINK-y-include += rte_eventdev_pmd_vdev.h
+SYMLINK-y-include += rte_event_ring.h
# versioning export map
EXPORT_MAP := rte_eventdev_version.map
diff --git a/lib/librte_eventdev/rte_event_ring.c b/lib/librte_eventdev/rte_event_ring.c
new file mode 100644
index 00000000..b14c2127
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_ring.c
@@ -0,0 +1,207 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+
+#include <rte_tailq.h>
+#include <rte_memzone.h>
+#include <rte_rwlock.h>
+#include <rte_eal_memconfig.h>
+#include "rte_event_ring.h"
+
+TAILQ_HEAD(rte_event_ring_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_event_ring_tailq = {
+ .name = RTE_TAILQ_EVENT_RING_NAME,
+};
+EAL_REGISTER_TAILQ(rte_event_ring_tailq)
+
+int
+rte_event_ring_init(struct rte_event_ring *r, const char *name,
+ unsigned int count, unsigned int flags)
+{
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_event_ring) &
+ RTE_CACHE_LINE_MASK) != 0);
+
+ /* init the ring structure */
+ return rte_ring_init(&r->r, name, count, flags);
+}
+
+/* create the ring */
+struct rte_event_ring *
+rte_event_ring_create(const char *name, unsigned int count, int socket_id,
+ unsigned int flags)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ struct rte_event_ring *r;
+ struct rte_tailq_entry *te;
+ const struct rte_memzone *mz;
+ ssize_t ring_size;
+ int mz_flags = 0;
+ struct rte_event_ring_list *ring_list = NULL;
+ const unsigned int requested_count = count;
+ int ret;
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+
+ /* for an exact size ring, round up from count to a power of two */
+ if (flags & RING_F_EXACT_SZ)
+ count = rte_align32pow2(count + 1);
+ else if (!rte_is_power_of_2(count)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ ring_size = sizeof(*r) + (count * sizeof(struct rte_event));
+
+ ret = snprintf(mz_name, sizeof(mz_name), "%s%s",
+ RTE_RING_MZ_PREFIX, name);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ rte_errno = ENAMETOOLONG;
+ return NULL;
+ }
+
+ te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /*
+ * reserve a memory zone for this ring. If we can't get rte_config or
+ * we are secondary process, the memzone_reserve function will set
+ * rte_errno for us appropriately - hence no check in this this function
+ */
+ mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+ if (mz != NULL) {
+ r = mz->addr;
+ /*
+ * no need to check return value here, we already checked the
+ * arguments above
+ */
+ rte_event_ring_init(r, name, requested_count, flags);
+
+ te->data = (void *) r;
+ r->r.memzone = mz;
+
+ TAILQ_INSERT_TAIL(ring_list, te, next);
+ } else {
+ r = NULL;
+ RTE_LOG(ERR, RING, "Cannot reserve memory\n");
+ rte_free(te);
+ }
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return r;
+}
+
+
+struct rte_event_ring *
+rte_event_ring_lookup(const char *name)
+{
+ struct rte_tailq_entry *te;
+ struct rte_event_ring *r = NULL;
+ struct rte_event_ring_list *ring_list;
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, ring_list, next) {
+ r = (struct rte_event_ring *) te->data;
+ if (strncmp(name, r->r.name, RTE_RING_NAMESIZE) == 0)
+ break;
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return r;
+}
+
+/* free the ring */
+void
+rte_event_ring_free(struct rte_event_ring *r)
+{
+ struct rte_event_ring_list *ring_list = NULL;
+ struct rte_tailq_entry *te;
+
+ if (r == NULL)
+ return;
+
+ /*
+ * Ring was not created with rte_event_ring_create,
+ * therefore, there is no memzone to free.
+ */
+ if (r->r.memzone == NULL) {
+ RTE_LOG(ERR, RING,
+ "Cannot free ring (not created with rte_event_ring_create()");
+ return;
+ }
+
+ if (rte_memzone_free(r->r.memzone) != 0) {
+ RTE_LOG(ERR, RING, "Cannot free memory\n");
+ return;
+ }
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, ring_list, next) {
+ if (te->data == (void *) r)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(ring_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(te);
+}
diff --git a/lib/librte_eventdev/rte_event_ring.h b/lib/librte_eventdev/rte_event_ring.h
new file mode 100644
index 00000000..ea9b6885
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_ring.h
@@ -0,0 +1,308 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * RTE Event Ring
+ *
+ * This provides a ring implementation for passing rte_event structures
+ * from one core to another.
+ */
+
+#ifndef _RTE_EVENT_RING_
+#define _RTE_EVENT_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include "rte_eventdev.h"
+
+#define RTE_TAILQ_EVENT_RING_NAME "RTE_EVENT_RING"
+
+/**
+ * Generic ring structure for passing rte_event objects from core to core.
+ *
+ * Based on the primitives given in the rte_ring library. Designed to be
+ * used inside software eventdev implementations and by applications
+ * directly as needed.
+ */
+struct rte_event_ring {
+ struct rte_ring r;
+};
+
+/**
+ * Returns the number of events in the ring
+ *
+ * @param r
+ * pointer to the event ring
+ * @return
+ * the number of events in the ring
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_count(const struct rte_event_ring *r)
+{
+ return rte_ring_count(&r->r);
+}
+
+/**
+ * Returns the amount of free space in the ring
+ *
+ * @param r
+ * pointer to the event ring
+ * @return
+ * the number of free slots in the ring, i.e. the number of events that
+ * can be successfully enqueued before dequeue must be called
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_free_count(const struct rte_event_ring *r)
+{
+ return rte_ring_free_count(&r->r);
+}
+
+/**
+ * Enqueue a set of events onto a ring
+ *
+ * Note: this API enqueues by copying the events themselves onto the ring,
+ * rather than just placing a pointer to each event onto the ring. This
+ * means that statically-allocated events can safely be enqueued by this
+ * API.
+ *
+ * @param r
+ * pointer to the event ring
+ * @param events
+ * pointer to an array of struct rte_event objects
+ * @param n
+ * number of events in the array to enqueue
+ * @param free_space
+ * if non-null, is updated to indicate the amount of free space in the
+ * ring once the enqueue has completed.
+ * @return
+ * the number of elements, n', enqueued to the ring, 0 <= n' <= n
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_enqueue_burst(struct rte_event_ring *r,
+ const struct rte_event *events,
+ unsigned int n, uint16_t *free_space)
+{
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+
+ n = __rte_ring_move_prod_head(&r->r, r->r.prod.single, n,
+ RTE_RING_QUEUE_VARIABLE,
+ &prod_head, &prod_next, &free_entries);
+ if (n == 0)
+ goto end;
+
+ ENQUEUE_PTRS(&r->r, &r[1], prod_head, events, n, struct rte_event);
+ rte_smp_wmb();
+
+ update_tail(&r->r.prod, prod_head, prod_next, 1);
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
+ return n;
+}
+
+/**
+ * Dequeue a set of events from a ring
+ *
+ * Note: this API does not work with pointers to events, rather it copies
+ * the events themselves to the destination ``events`` buffer.
+ *
+ * @param r
+ * pointer to the event ring
+ * @param events
+ * pointer to an array to hold the struct rte_event objects
+ * @param n
+ * number of events that can be held in the ``events`` array
+ * @param available
+ * if non-null, is updated to indicate the number of events remaining in
+ * the ring once the dequeue has completed
+ * @return
+ * the number of elements, n', dequeued from the ring, 0 <= n' <= n
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_dequeue_burst(struct rte_event_ring *r,
+ struct rte_event *events,
+ unsigned int n, uint16_t *available)
+{
+ uint32_t cons_head, cons_next;
+ uint32_t entries;
+
+ n = __rte_ring_move_cons_head(&r->r, r->r.cons.single, n,
+ RTE_RING_QUEUE_VARIABLE,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;
+
+ DEQUEUE_PTRS(&r->r, &r[1], cons_head, events, n, struct rte_event);
+ rte_smp_rmb();
+
+ update_tail(&r->r.cons, cons_head, cons_next, 1);
+
+end:
+ if (available != NULL)
+ *available = entries - n;
+ return n;
+}
+
+/*
+ * Initializes an already-allocated ring structure
+ *
+ * @param r
+ * pointer to the ring memory to be initialized
+ * @param name
+ * name to be given to the ring
+ * @param count
+ * the number of elements to be stored in the ring. If the flag
+ * ``RING_F_EXACT_SZ`` is not set, this must be a power of 2, and the actual
+ * usable space in the ring will be ``count - 1`` entries. If the flag
+ * ``RING_F_EXACT_SZ`` is set, the this can be any value up to the ring size
+ * limit - 1, and the usable space will be exactly that requested.
+ * @param flags
+ * An OR of the following:
+ * - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * is "single-producer". Otherwise, it is "multi-producers".
+ * - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * is "single-consumer". Otherwise, it is "multi-consumers".
+ * - RING_F_EXACT_SZ: If this flag is set, the ``count`` parameter is to
+ * be taken as the exact usable size of the ring, and as such does not
+ * need to be a power of 2. The underlying ring memory should be a
+ * power-of-2 size greater than the count value.
+ * @return
+ * 0 on success, or a negative value on error.
+ */
+int
+rte_event_ring_init(struct rte_event_ring *r, const char *name,
+ unsigned int count, unsigned int flags);
+
+/*
+ * Create an event ring structure
+ *
+ * This function allocates memory and initializes an event ring inside that
+ * memory.
+ *
+ * @param name
+ * name to be given to the ring
+ * @param count
+ * the number of elements to be stored in the ring. If the flag
+ * ``RING_F_EXACT_SZ`` is not set, this must be a power of 2, and the actual
+ * usable space in the ring will be ``count - 1`` entries. If the flag
+ * ``RING_F_EXACT_SZ`` is set, the this can be any value up to the ring size
+ * limit - 1, and the usable space will be exactly that requested.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * An OR of the following:
+ * - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * is "single-producer". Otherwise, it is "multi-producers".
+ * - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * is "single-consumer". Otherwise, it is "multi-consumers".
+ * - RING_F_EXACT_SZ: If this flag is set, the ``count`` parameter is to
+ * be taken as the exact usable size of the ring, and as such does not
+ * need to be a power of 2. The underlying ring memory should be a
+ * power-of-2 size greater than the count value.
+ * @return
+ * On success, the pointer to the new allocated ring. NULL on error with
+ * rte_errno set appropriately. Possible errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - count provided is not a power of 2
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_event_ring *
+rte_event_ring_create(const char *name, unsigned int count, int socket_id,
+ unsigned int flags);
+
+/**
+ * Search for an event ring based on its name
+ *
+ * @param name
+ * The name of the ring.
+ * @return
+ * The pointer to the ring matching the name, or NULL if not found,
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_event_ring *
+rte_event_ring_lookup(const char *name);
+
+/**
+ * De-allocate all memory used by the ring.
+ *
+ * @param r
+ * Ring to free
+ */
+void
+rte_event_ring_free(struct rte_event_ring *r);
+
+/**
+ * Return the size of the event ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The size of the data store used by the ring.
+ * NOTE: this is not the same as the usable space in the ring. To query that
+ * use ``rte_ring_get_capacity()``.
+ */
+static inline unsigned int
+rte_event_ring_get_size(const struct rte_event_ring *r)
+{
+ return rte_ring_get_size(&r->r);
+}
+
+/**
+ * Return the number of elements which can be stored in the event ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The usable size of the ring.
+ */
+static inline unsigned int
+rte_event_ring_get_capacity(const struct rte_event_ring *r)
+{
+ return rte_ring_get_capacity(&r->r);
+}
+#endif
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index 20afc3f0..bbb38050 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -45,7 +45,6 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_dev.h>
-#include <rte_pci.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
@@ -126,8 +125,6 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
dev_info->dev = dev->dev;
- if (dev->driver)
- dev_info->driver_name = dev->driver->pci_drv.driver.name;
return 0;
}
@@ -301,7 +298,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
sizeof(dev->data->links_map[0]) * nb_ports *
RTE_EVENT_MAX_QUEUES_PER_DEV,
RTE_CACHE_LINE_SIZE);
- if (dev->data->links_map == NULL) {
+ if (links_map == NULL) {
dev->data->nb_ports = 0;
RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
"nb_ports %u", nb_ports);
@@ -369,9 +366,10 @@ rte_event_dev_configure(uint8_t dev_id,
/* Check dequeue_timeout_ns value is in limit */
if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
- if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
+ if (dev_conf->dequeue_timeout_ns &&
+ (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
|| dev_conf->dequeue_timeout_ns >
- info.max_dequeue_timeout_ns) {
+ info.max_dequeue_timeout_ns)) {
RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
dev_id, dev_conf->dequeue_timeout_ns,
@@ -429,8 +427,9 @@ rte_event_dev_configure(uint8_t dev_id,
dev_id);
return -EINVAL;
}
- if (dev_conf->nb_event_port_dequeue_depth >
- info.max_event_port_dequeue_depth) {
+ if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+ (dev_conf->nb_event_port_dequeue_depth >
+ info.max_event_port_dequeue_depth)) {
RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
dev_id, dev_conf->nb_event_port_dequeue_depth,
info.max_event_port_dequeue_depth);
@@ -443,8 +442,9 @@ rte_event_dev_configure(uint8_t dev_id,
dev_id);
return -EINVAL;
}
- if (dev_conf->nb_event_port_enqueue_depth >
- info.max_event_port_enqueue_depth) {
+ if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+ (dev_conf->nb_event_port_enqueue_depth >
+ info.max_event_port_enqueue_depth)) {
RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
dev_id, dev_conf->nb_event_port_enqueue_depth,
info.max_event_port_enqueue_depth);
@@ -1174,10 +1174,6 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
if (eventdev == NULL)
return -EINVAL;
- ret = rte_event_dev_close(eventdev->data->dev_id);
- if (ret < 0)
- return ret;
-
eventdev->attached = RTE_EVENTDEV_DETACHED;
eventdev_globals.nb_devs--;
@@ -1202,144 +1198,3 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
eventdev->data = NULL;
return 0;
}
-
-struct rte_eventdev *
-rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
- int socket_id)
-{
- struct rte_eventdev *eventdev;
-
- /* Allocate device structure */
- eventdev = rte_event_pmd_allocate(name, socket_id);
- if (eventdev == NULL)
- return NULL;
-
- /* Allocate private device structure */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- eventdev->data->dev_private =
- rte_zmalloc_socket("eventdev device private",
- dev_private_size,
- RTE_CACHE_LINE_SIZE,
- socket_id);
-
- if (eventdev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private device"
- " data");
- }
-
- return eventdev;
-}
-
-int
-rte_event_pmd_vdev_uninit(const char *name)
-{
- struct rte_eventdev *eventdev;
-
- if (name == NULL)
- return -EINVAL;
-
- eventdev = rte_event_pmd_get_named_dev(name);
- if (eventdev == NULL)
- return -ENODEV;
-
- /* Free the event device */
- rte_event_pmd_release(eventdev);
-
- return 0;
-}
-
-int
-rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev)
-{
- struct rte_eventdev_driver *eventdrv;
- struct rte_eventdev *eventdev;
-
- char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
-
- int retval;
-
- eventdrv = (struct rte_eventdev_driver *)pci_drv;
- if (eventdrv == NULL)
- return -ENODEV;
-
- rte_pci_device_name(&pci_dev->addr, eventdev_name,
- sizeof(eventdev_name));
-
- eventdev = rte_event_pmd_allocate(eventdev_name,
- pci_dev->device.numa_node);
- if (eventdev == NULL)
- return -ENOMEM;
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- eventdev->data->dev_private =
- rte_zmalloc_socket(
- "eventdev private structure",
- eventdrv->dev_private_size,
- RTE_CACHE_LINE_SIZE,
- rte_socket_id());
-
- if (eventdev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private "
- "device data");
- }
-
- eventdev->dev = &pci_dev->device;
- eventdev->driver = eventdrv;
-
- /* Invoke PMD device initialization function */
- retval = (*eventdrv->eventdev_init)(eventdev);
- if (retval == 0)
- return 0;
-
- RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
- " failed", pci_drv->driver.name,
- (unsigned int) pci_dev->id.vendor_id,
- (unsigned int) pci_dev->id.device_id);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eventdev->data->dev_private);
-
- eventdev->attached = RTE_EVENTDEV_DETACHED;
- eventdev_globals.nb_devs--;
-
- return -ENXIO;
-}
-
-int
-rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev)
-{
- const struct rte_eventdev_driver *eventdrv;
- struct rte_eventdev *eventdev;
- char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
- int ret;
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_pci_device_name(&pci_dev->addr, eventdev_name,
- sizeof(eventdev_name));
-
- eventdev = rte_event_pmd_get_named_dev(eventdev_name);
- if (eventdev == NULL)
- return -ENODEV;
-
- eventdrv = (const struct rte_eventdev_driver *)pci_dev->driver;
- if (eventdrv == NULL)
- return -ENODEV;
-
- /* Invoke PMD device un-init function */
- if (*eventdrv->eventdev_uninit) {
- ret = (*eventdrv->eventdev_uninit)(eventdev);
- if (ret)
- return ret;
- }
-
- /* Free event device */
- rte_event_pmd_release(eventdev);
-
- eventdev->dev = NULL;
- eventdev->driver = NULL;
-
- return 0;
-}
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index 20e7293e..128bc522 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright 2016 Cavium.
+ * Copyright 2016 Cavium, Inc.
* Copyright 2016 Intel Corporation.
* Copyright 2016 NXP.
*
@@ -15,7 +15,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -199,20 +199,6 @@
* operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
* functions to applications.
*
- * An event driven based application has following typical workflow on fastpath:
- * \code{.c}
- * while (1) {
- *
- * rte_event_schedule(dev_id);
- *
- * rte_event_dequeue(...);
- *
- * (event processing)
- *
- * rte_event_enqueue(...);
- * }
- * \endcode
- *
* The events are injected to event device through *enqueue* operation by
* event producers in the system. The typical event producers are ethdev
* subsystem for generating packet events, CPU(SW) for generating events based
@@ -237,6 +223,15 @@
* indicates the device is centralized and thus needs a dedicated scheduling
* thread that repeatedly calls rte_event_schedule().
*
+ * An event driven worker thread has following typical workflow on fastpath:
+ * \code{.c}
+ * while (1) {
+ * rte_event_dequeue_burst(...);
+ * (event processing)
+ * rte_event_enqueue_burst(...);
+ * }
+ * \endcode
+ *
*/
#ifdef __cplusplus
@@ -279,6 +274,14 @@ struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
*
* @see RTE_EVENT_QUEUE_CFG_* values
*/
+#define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
+/**< Event device is capable of operating in burst mode for enqueue(forward,
+ * release) and dequeue operation. If this capability is not set, application
+ * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
+ * PMD accepts only one event at a time.
+ *
+ * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
+ */
/* Event device priority levels */
#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
@@ -409,6 +412,7 @@ struct rte_event_dev_config {
* This value should be in the range of *min_dequeue_timeout_ns* and
* *max_dequeue_timeout_ns* which previously provided in
* rte_event_dev_info_get()
+ * The value 0 is allowed, in which case, default dequeue timeout used.
* @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
*/
int32_t nb_events_limit;
@@ -438,14 +442,16 @@ struct rte_event_dev_config {
/**< Maximum number of events can be dequeued at a time from an
* event port by this device.
* This value cannot exceed the *max_event_port_dequeue_depth*
- * which previously provided in rte_event_dev_info_get()
+ * which previously provided in rte_event_dev_info_get().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
* @see rte_event_port_setup()
*/
uint32_t nb_event_port_enqueue_depth;
/**< Maximum number of events can be enqueued at a time from an
* event port by this device.
* This value cannot exceed the *max_event_port_enqueue_depth*
- * which previously provided in rte_event_dev_info_get()
+ * which previously provided in rte_event_dev_info_get().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
* @see rte_event_port_setup()
*/
uint32_t event_dev_cfg;
@@ -521,9 +527,11 @@ rte_event_dev_configure(uint8_t dev_id,
struct rte_event_queue_conf {
uint32_t nb_atomic_flows;
/**< The maximum number of active flows this queue can track at any
- * given time. The value must be in the range of
- * [1 - nb_event_queue_flows)] which previously provided in
- * rte_event_dev_info_get().
+ * given time. If the queue is configured for atomic scheduling (by
+ * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES or
+ * RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY flags to event_queue_cfg), then the
+ * value must be in the range of [1, nb_event_queue_flows], which was
+ * previously provided in rte_event_dev_configure().
*/
uint32_t nb_atomic_order_sequences;
/**< The maximum number of outstanding events waiting to be
@@ -533,8 +541,11 @@ struct rte_event_queue_conf {
* scheduler cannot schedule the events from this queue and invalid
* event will be returned from dequeue until one or more entries are
* freed up/released.
- * The value must be in the range of [1 - nb_event_queue_flows)]
- * which previously supplied to rte_event_dev_configure().
+ * If the queue is configured for ordered scheduling (by applying the
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES or RTE_EVENT_QUEUE_CFG_ORDERED_ONLY
+ * flags to event_queue_cfg), then the value must be in the range of
+ * [1, nb_event_queue_flows], which was previously supplied to
+ * rte_event_dev_configure().
*/
uint32_t event_queue_cfg; /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
uint8_t priority;
@@ -642,12 +653,14 @@ struct rte_event_port_conf {
uint16_t dequeue_depth;
/**< Configure number of bulk dequeues for this event port.
* This value cannot exceed the *nb_event_port_dequeue_depth*
- * which previously supplied to rte_event_dev_configure()
+ * which previously supplied to rte_event_dev_configure().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
*/
uint16_t enqueue_depth;
/**< Configure number of bulk enqueues for this event port.
* This value cannot exceed the *nb_event_port_enqueue_depth*
- * which previously supplied to rte_event_dev_configure()
+ * which previously supplied to rte_event_dev_configure().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
*/
};
@@ -1052,6 +1065,10 @@ struct rte_eventdev {
/**< Pointer to PMD enqueue function. */
event_enqueue_burst_t enqueue_burst;
/**< Pointer to PMD enqueue burst function. */
+ event_enqueue_burst_t enqueue_new_burst;
+ /**< Pointer to PMD enqueue burst function(op new variant) */
+ event_enqueue_burst_t enqueue_forward_burst;
+ /**< Pointer to PMD enqueue burst function(op forward variant) */
event_dequeue_t dequeue;
/**< Pointer to PMD dequeue function. */
event_dequeue_burst_t dequeue_burst;
@@ -1063,8 +1080,6 @@ struct rte_eventdev {
/**< Functions exported by PMD */
struct rte_device *dev;
/**< Device info. supplied by probing */
- const struct rte_eventdev_driver *driver;
- /**< Driver for this device */
RTE_STD_C11
uint8_t attached : 1;
@@ -1092,6 +1107,34 @@ rte_event_schedule(uint8_t dev_id)
(*dev->schedule)(dev);
}
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events,
+ const event_enqueue_burst_t fn)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (port_id >= dev->data->nb_ports) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->enqueue)(dev->data->ports[port_id], ev);
+ else
+ return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
/**
* Enqueue a burst of events objects or an event object supplied in *rte_event*
* structure on an event device designated by its *dev_id* through the event
@@ -1135,30 +1178,108 @@ static inline uint16_t
rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
const struct rte_event ev[], uint16_t nb_events)
{
- struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
-#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
- if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
- rte_errno = -EINVAL;
- return 0;
- }
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_burst);
+}
- if (port_id >= dev->data->nb_ports) {
- rte_errno = -EINVAL;
- return 0;
- }
-#endif
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
- /*
- * Allow zero cost non burst mode routine invocation if application
- * requests nb_events as const one
- */
- if (nb_events == 1)
- return (*dev->enqueue)(
- dev->data->ports[port_id], ev);
- else
- return (*dev->enqueue_burst)(
- dev->data->ports[port_id], ev, nb_events);
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_new_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
+ * on an event device designated by its *dev_id* through the event port
+ * specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_FORWARD.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_forward_burst);
}
/**
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
index 4005b3c9..3d72acf3 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -46,11 +46,10 @@ extern "C" {
#include <string.h>
+#include <rte_common.h>
#include <rte_dev.h>
-#include <rte_pci.h>
-#include <rte_malloc.h>
#include <rte_log.h>
-#include <rte_common.h>
+#include <rte_malloc.h>
#include "rte_eventdev.h"
@@ -87,60 +86,6 @@ extern "C" {
#define RTE_EVENTDEV_DETACHED (0)
#define RTE_EVENTDEV_ATTACHED (1)
-/**
- * Initialisation function of a event driver invoked for each matching
- * event PCI device detected during the PCI probing phase.
- *
- * @param dev
- * The dev pointer is the address of the *rte_eventdev* structure associated
- * with the matching device and which has been [automatically] allocated in
- * the *rte_event_devices* array.
- *
- * @return
- * - 0: Success, the device is properly initialised by the driver.
- * In particular, the driver MUST have set up the *dev_ops* pointer
- * of the *dev* structure.
- * - <0: Error code of the device initialisation failure.
- */
-typedef int (*eventdev_init_t)(struct rte_eventdev *dev);
-
-/**
- * Finalisation function of a driver invoked for each matching
- * PCI device detected during the PCI closing phase.
- *
- * @param dev
- * The dev pointer is the address of the *rte_eventdev* structure associated
- * with the matching device and which has been [automatically] allocated in
- * the *rte_event_devices* array.
- *
- * @return
- * - 0: Success, the device is properly finalised by the driver.
- * In particular, the driver MUST free the *dev_ops* pointer
- * of the *dev* structure.
- * - <0: Error code of the device initialisation failure.
- */
-typedef int (*eventdev_uninit_t)(struct rte_eventdev *dev);
-
-/**
- * The structure associated with a PMD driver.
- *
- * Each driver acts as a PCI driver and is represented by a generic
- * *event_driver* structure that holds:
- *
- * - An *rte_pci_driver* structure (which must be the first field).
- *
- * - The *eventdev_init* function invoked for each matching PCI device.
- *
- * - The size of the private data to allocate for each matching device.
- */
-struct rte_eventdev_driver {
- struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */
- unsigned int dev_private_size; /**< Size of device private data. */
-
- eventdev_init_t eventdev_init; /**< Device init function. */
- eventdev_uninit_t eventdev_uninit; /**< Device uninit function. */
-};
-
/** Global structure used for maintaining state of allocated event devices */
struct rte_eventdev_global {
uint8_t nb_devs; /**< Number of devices found */
@@ -550,48 +495,6 @@ rte_event_pmd_allocate(const char *name, int socket_id);
int
rte_event_pmd_release(struct rte_eventdev *eventdev);
-/**
- * Creates a new virtual event device and returns the pointer to that device.
- *
- * @param name
- * PMD type name
- * @param dev_private_size
- * Size of event PMDs private data
- * @param socket_id
- * Socket to allocate resources on.
- *
- * @return
- * - Eventdev pointer if device is successfully created.
- * - NULL if device cannot be created.
- */
-struct rte_eventdev *
-rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
- int socket_id);
-
-/**
- * Destroy the given virtual event device
- *
- * @param name
- * PMD type name
- * @return
- * - 0 on success, negative on error
- */
-int
-rte_event_pmd_vdev_uninit(const char *name);
-
-/**
- * Wrapper for use by pci drivers as a .probe function to attach to a event
- * interface.
- */
-int rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev);
-
-/**
- * Wrapper for use by pci drivers as a .remove function to detach a event
- * interface.
- */
-int rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev);
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
new file mode 100644
index 00000000..b6bd7319
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
@@ -0,0 +1,162 @@
+/*
+ *
+ * Copyright(c) 2016-2017 Cavium, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_PCI_H_
+#define _RTE_EVENTDEV_PMD_PCI_H_
+
+/** @file
+ * RTE Eventdev PCI PMD APIs
+ *
+ * @note
+ * These API are from event PCI PMD only and user applications should not call
+ * them directly.
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_pci.h>
+
+#include "rte_eventdev_pmd.h"
+
+typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .probe function to attach to a event
+ * interface.
+ */
+static inline int
+rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev,
+ size_t private_data_size,
+ eventdev_pmd_pci_callback_t devinit)
+{
+ struct rte_eventdev *eventdev;
+
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+
+ int retval;
+
+ if (devinit == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_allocate(eventdev_name,
+ pci_dev->device.numa_node);
+ if (eventdev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket(
+ "eventdev private structure",
+ private_data_size,
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ eventdev->dev = &pci_dev->device;
+
+ /* Invoke PMD device initialization function */
+ retval = devinit(eventdev);
+ if (retval == 0)
+ return 0;
+
+ RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
+ " failed", pci_drv->driver.name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ rte_event_pmd_release(eventdev);
+
+ return -ENXIO;
+}
+
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .remove function to detach a event
+ * interface.
+ */
+static inline int
+rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
+ eventdev_pmd_pci_callback_t devuninit)
+{
+ struct rte_eventdev *eventdev;
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ int ret = 0;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_get_named_dev(eventdev_name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ ret = rte_event_dev_close(eventdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+
+ /* Invoke PMD device un-init function */
+ if (devuninit)
+ ret = devuninit(eventdev);
+ if (ret)
+ return ret;
+
+ /* Free event device */
+ rte_event_pmd_release(eventdev);
+
+ eventdev->dev = NULL;
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_PCI_H_ */
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_vdev.h b/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
new file mode 100644
index 00000000..135e8b80
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright(c) 2016-2017 Cavium, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_VDEV_H_
+#define _RTE_EVENTDEV_PMD_VDEV_H_
+
+/** @file
+ * RTE Eventdev VDEV PMD APIs
+ *
+ * @note
+ * These API are from event VDEV PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_vdev.h>
+
+#include "rte_eventdev_pmd.h"
+
+/**
+ * @internal
+ * Creates a new virtual event device and returns the pointer to that device.
+ *
+ * @param name
+ * PMD type name
+ * @param dev_private_size
+ * Size of event PMDs private data
+ * @param socket_id
+ * Socket to allocate resources on.
+ *
+ * @return
+ * - Eventdev pointer if device is successfully created.
+ * - NULL if device cannot be created.
+ */
+static inline struct rte_eventdev *
+rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
+ int socket_id)
+{
+
+ struct rte_eventdev *eventdev;
+
+ /* Allocate device structure */
+ eventdev = rte_event_pmd_allocate(name, socket_id);
+ if (eventdev == NULL)
+ return NULL;
+
+ /* Allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket("eventdev device private",
+ dev_private_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private device"
+ " data");
+ }
+
+ return eventdev;
+}
+
+/**
+ * @internal
+ * Destroy the given virtual event device
+ *
+ * @param name
+ * PMD type name
+ * @return
+ * - 0 on success, negative on error
+ */
+static inline int
+rte_event_pmd_vdev_uninit(const char *name)
+{
+ int ret;
+ struct rte_eventdev *eventdev;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ eventdev = rte_event_pmd_get_named_dev(name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ ret = rte_event_dev_close(eventdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+
+ /* Free the event device */
+ rte_event_pmd_release(eventdev);
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_VDEV_H_ */
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 1fa6b333..4c48e5f0 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -42,3 +42,12 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_17.08 {
+ global:
+
+ rte_event_ring_create;
+ rte_event_ring_free;
+ rte_event_ring_init;
+ rte_event_ring_lookup;
+} DPDK_17.05;