diff options
Diffstat (limited to 'drivers/event/dpaa2')
-rw-r--r-- | drivers/event/dpaa2/Makefile | 8 | ||||
-rw-r--r-- | drivers/event/dpaa2/dpaa2_eventdev.c | 462 | ||||
-rw-r--r-- | drivers/event/dpaa2/dpaa2_eventdev.h | 24 | ||||
-rw-r--r-- | drivers/event/dpaa2/meson.build | 5 |
4 files changed, 401 insertions, 98 deletions
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile index 5e1a6320..e0134cc4 100644 --- a/drivers/event/dpaa2/Makefile +++ b/drivers/event/dpaa2/Makefile @@ -21,13 +21,19 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal LDLIBS += -lrte_eal -lrte_eventdev LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2 LDLIBS += -lrte_bus_vdev +LDLIBS += -lrte_common_dpaax CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc +ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y) +LDLIBS += -lrte_pmd_dpaa2_sec +CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec +endif + # versioning export map EXPORT_MAP := rte_pmd_dpaa2_event_version.map -LIBABIVER := 1 +LIBABIVER := 2 # depends on fslmc bus which uses experimental API CFLAGS += -DALLOW_EXPERIMENTAL_API diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c index ea1e5cc6..8d168b02 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/drivers/event/dpaa2/dpaa2_eventdev.c @@ -27,6 +27,7 @@ #include <rte_pci.h> #include <rte_bus_vdev.h> #include <rte_ethdev_driver.h> +#include <rte_cryptodev.h> #include <rte_event_eth_rx_adapter.h> #include <fslmc_vfio.h> @@ -34,6 +35,9 @@ #include <dpaa2_hw_mempool.h> #include <dpaa2_hw_dpio.h> #include <dpaa2_ethdev.h> +#ifdef RTE_LIBRTE_SECURITY +#include <dpaa2_sec_event.h> +#endif #include "dpaa2_eventdev.h" #include "dpaa2_eventdev_logs.h" #include <portal/dpaa2_hw_pvt.h> @@ -54,34 +58,63 @@ static uint16_t dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], uint16_t nb_events) { - struct rte_eventdev *ev_dev = - ((struct dpaa2_io_portal_t *)port)->eventdev; - struct dpaa2_eventdev *priv = ev_dev->data->dev_private; + + struct dpaa2_port *dpaa2_portal = port; + struct dpaa2_dpio_dev *dpio_dev; uint32_t queue_id = ev[0].queue_id; - struct evq_info_t *evq_info = &priv->evq_info[queue_id]; + struct dpaa2_eventq *evq_info; uint32_t fqid; struct qbman_swp *swp; struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; uint32_t loop, frames_to_send; struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; uint16_t num_tx = 0; - int ret; - - RTE_SET_USED(port); + int i, n, ret; + uint8_t channel_index; if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + /* Affine current thread context to a qman portal */ ret = dpaa2_affine_qbman_swp(); - if (ret) { + if (ret < 0) { DPAA2_EVENTDEV_ERR("Failure in affining portal"); return 0; } } - + /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */ + dpio_dev = DPAA2_PER_LCORE_DPIO; swp = DPAA2_PER_LCORE_PORTAL; + if (likely(dpaa2_portal->is_port_linked)) + goto skip_linking; + + /* Create mapping between portal and channel to receive packets */ + for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { + evq_info = &dpaa2_portal->evq_info[i]; + if (!evq_info->event_port) + continue; + + ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, + CMD_PRI_LOW, + dpio_dev->token, + evq_info->dpcon->dpcon_id, + &channel_index); + if (ret < 0) { + DPAA2_EVENTDEV_ERR( + "Static dequeue config failed: err(%d)", ret); + goto err; + } + + qbman_swp_push_set(swp, channel_index, 1); + evq_info->dpcon->channel_index = channel_index; + } + dpaa2_portal->is_port_linked = true; + +skip_linking: + evq_info = &dpaa2_portal->evq_info[queue_id]; + while (nb_events) { - frames_to_send = (nb_events >> 3) ? - MAX_TX_RING_SLOTS : nb_events; + frames_to_send = (nb_events > dpaa2_eqcr_size) ? + dpaa2_eqcr_size : nb_events; for (loop = 0; loop < frames_to_send; loop++) { const struct rte_event *event = &ev[num_tx + loop]; @@ -99,14 +132,14 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], qbman_eq_desc_set_no_orp(&eqdesc[loop], 0); qbman_eq_desc_set_response(&eqdesc[loop], 0, 0); - if (event->mbuf->seqn) { + if (event->sched_type == RTE_SCHED_TYPE_ATOMIC + && event->mbuf->seqn) { uint8_t dqrr_index = event->mbuf->seqn - 1; qbman_eq_desc_set_dca(&eqdesc[loop], 1, dqrr_index, 0); DPAA2_PER_LCORE_DQRR_SIZE--; - DPAA2_PER_LCORE_DQRR_HELD &= - ~(1 << dqrr_index); + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); } memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); @@ -116,7 +149,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], * to avoid copy */ struct rte_event *ev_temp = rte_malloc(NULL, - sizeof(struct rte_event), 0); + sizeof(struct rte_event), 0); if (!ev_temp) { if (!loop) @@ -143,6 +176,18 @@ send_partial: } return num_tx; +err: + for (n = 0; n < i; n++) { + evq_info = &dpaa2_portal->evq_info[n]; + if (!evq_info->event_port) + continue; + qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); + dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, + dpio_dev->token, + evq_info->dpcon->dpcon_id); + } + return 0; + } static uint16_t @@ -197,6 +242,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, ev->mbuf->seqn = dqrr_index + 1; DPAA2_PER_LCORE_DQRR_SIZE++; DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; + DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; } static uint16_t @@ -204,22 +250,53 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks) { const struct qbman_result *dq; + struct dpaa2_dpio_dev *dpio_dev = NULL; + struct dpaa2_port *dpaa2_portal = port; + struct dpaa2_eventq *evq_info; struct qbman_swp *swp; const struct qbman_fd *fd; struct dpaa2_queue *rxq; - int num_pkts = 0, ret, i = 0; - - RTE_SET_USED(port); + int num_pkts = 0, ret, i = 0, n; + uint8_t channel_index; if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + /* Affine current thread context to a qman portal */ ret = dpaa2_affine_qbman_swp(); - if (ret) { + if (ret < 0) { DPAA2_EVENTDEV_ERR("Failure in affining portal"); return 0; } } + + dpio_dev = DPAA2_PER_LCORE_DPIO; swp = DPAA2_PER_LCORE_PORTAL; + if (likely(dpaa2_portal->is_port_linked)) + goto skip_linking; + + /* Create mapping between portal and channel to receive packets */ + for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { + evq_info = &dpaa2_portal->evq_info[i]; + if (!evq_info->event_port) + continue; + + ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, + CMD_PRI_LOW, + dpio_dev->token, + evq_info->dpcon->dpcon_id, + &channel_index); + if (ret < 0) { + DPAA2_EVENTDEV_ERR( + "Static dequeue config failed: err(%d)", ret); + goto err; + } + + qbman_swp_push_set(swp, channel_index, 1); + evq_info->dpcon->channel_index = channel_index; + } + dpaa2_portal->is_port_linked = true; + +skip_linking: /* Check if there are atomic contexts to be released */ while (DPAA2_PER_LCORE_DQRR_SIZE) { if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) { @@ -258,6 +335,18 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], } while (num_pkts < nb_events); return num_pkts; +err: + for (n = 0; n < i; n++) { + evq_info = &dpaa2_portal->evq_info[n]; + if (!evq_info->event_port) + continue; + + qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); + dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, + dpio_dev->token, + evq_info->dpcon->dpcon_id); + } + return 0; } static uint16_t @@ -283,7 +372,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev, dev_info->max_dequeue_timeout_ns = DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT; dev_info->dequeue_timeout_ns = - DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT; + DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; dev_info->max_event_queues = priv->max_event_queues; dev_info->max_event_queue_flows = DPAA2_EVENT_MAX_QUEUE_FLOWS; @@ -292,6 +381,9 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev, dev_info->max_event_priority_levels = DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS; dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO); + /* we only support dpio upto number of cores*/ + if (dev_info->max_event_ports > rte_lcore_count()) + dev_info->max_event_ports = rte_lcore_count(); dev_info->max_event_port_dequeue_depth = DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; dev_info->max_event_port_enqueue_depth = @@ -313,7 +405,6 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev) EVENTDEV_INIT_FUNC_TRACE(); - priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; priv->nb_event_queues = conf->nb_event_queues; priv->nb_event_ports = conf->nb_event_ports; priv->nb_event_queue_flows = conf->nb_event_queue_flows; @@ -321,6 +412,20 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev) priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth; priv->event_dev_cfg = conf->event_dev_cfg; + /* Check dequeue timeout method is per dequeue or global */ + if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { + /* + * Use timeout value as given in dequeue operation. + * So invalidating this timeout value. + */ + priv->dequeue_timeout_ns = 0; + + } else if (conf->dequeue_timeout_ns == 0) { + priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; + } else { + priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; + } + DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d", dev->data->dev_id); return 0; @@ -370,31 +475,39 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; } -static void -dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) -{ - EVENTDEV_INIT_FUNC_TRACE(); - - RTE_SET_USED(dev); - RTE_SET_USED(queue_id); -} - static int dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf) { struct dpaa2_eventdev *priv = dev->data->dev_private; - struct evq_info_t *evq_info = - &priv->evq_info[queue_id]; + struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id]; EVENTDEV_INIT_FUNC_TRACE(); + switch (queue_conf->schedule_type) { + case RTE_SCHED_TYPE_PARALLEL: + case RTE_SCHED_TYPE_ATOMIC: + break; + case RTE_SCHED_TYPE_ORDERED: + DPAA2_EVENTDEV_ERR("Schedule type is not supported."); + return -1; + } evq_info->event_queue_cfg = queue_conf->event_queue_cfg; + evq_info->event_queue_id = queue_id; return 0; } static void +dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) +{ + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + RTE_SET_USED(queue_id); +} + +static void dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, struct rte_event_port_conf *port_conf) { @@ -402,7 +515,6 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, RTE_SET_USED(dev); RTE_SET_USED(port_id); - RTE_SET_USED(port_conf); port_conf->new_event_threshold = DPAA2_EVENT_MAX_NUM_EVENTS; @@ -413,56 +525,44 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, port_conf->disable_implicit_release = 0; } -static void -dpaa2_eventdev_port_release(void *port) -{ - EVENTDEV_INIT_FUNC_TRACE(); - - RTE_SET_USED(port); -} - static int dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, const struct rte_event_port_conf *port_conf) { + char event_port_name[32]; + struct dpaa2_port *portal; + EVENTDEV_INIT_FUNC_TRACE(); RTE_SET_USED(port_conf); - if (!dpaa2_io_portal[port_id].dpio_dev) { - dpaa2_io_portal[port_id].dpio_dev = - dpaa2_get_qbman_swp(port_id); - rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count); - if (!dpaa2_io_portal[port_id].dpio_dev) - return -1; + sprintf(event_port_name, "event-port-%d", port_id); + portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0); + if (!portal) { + DPAA2_EVENTDEV_ERR("Memory allocation failure"); + return -ENOMEM; } - dpaa2_io_portal[port_id].eventdev = dev; - dev->data->ports[port_id] = &dpaa2_io_portal[port_id]; + memset(portal, 0, sizeof(struct dpaa2_port)); + dev->data->ports[port_id] = portal; return 0; } -static int -dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port, - uint8_t queues[], uint16_t nb_unlinks) +static void +dpaa2_eventdev_port_release(void *port) { - struct dpaa2_eventdev *priv = dev->data->dev_private; - struct dpaa2_io_portal_t *dpaa2_portal = port; - struct evq_info_t *evq_info; - int i; + struct dpaa2_port *portal = port; EVENTDEV_INIT_FUNC_TRACE(); - for (i = 0; i < nb_unlinks; i++) { - evq_info = &priv->evq_info[queues[i]]; - qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal, - evq_info->dpcon->channel_index, 0); - dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio, - 0, dpaa2_portal->dpio_dev->token, - evq_info->dpcon->dpcon_id); - } + /* TODO: Cleanup is required when ports are in linked state. */ + if (portal->is_port_linked) + DPAA2_EVENTDEV_WARN("Event port must be unlinked before release"); - return (int)nb_unlinks; + if (portal) + rte_free(portal); + + portal = NULL; } static int @@ -471,51 +571,71 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port, uint16_t nb_links) { struct dpaa2_eventdev *priv = dev->data->dev_private; - struct dpaa2_io_portal_t *dpaa2_portal = port; - struct evq_info_t *evq_info; - uint8_t channel_index; - int ret, i, n; + struct dpaa2_port *dpaa2_portal = port; + struct dpaa2_eventq *evq_info; + uint16_t i; EVENTDEV_INIT_FUNC_TRACE(); + RTE_SET_USED(priorities); + for (i = 0; i < nb_links; i++) { evq_info = &priv->evq_info[queues[i]]; + memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info, + sizeof(struct dpaa2_eventq)); + dpaa2_portal->evq_info[queues[i]].event_port = port; + dpaa2_portal->num_linked_evq++; + } - ret = dpio_add_static_dequeue_channel( - dpaa2_portal->dpio_dev->dpio, - CMD_PRI_LOW, dpaa2_portal->dpio_dev->token, - evq_info->dpcon->dpcon_id, &channel_index); - if (ret < 0) { - DPAA2_EVENTDEV_ERR( - "Static dequeue config failed: err(%d)", ret); - goto err; - } + return (int)nb_links; +} - qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal, - channel_index, 1); - evq_info->dpcon->channel_index = channel_index; - } +static int +dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port, + uint8_t queues[], uint16_t nb_unlinks) +{ + struct dpaa2_port *dpaa2_portal = port; + int i; + struct dpaa2_dpio_dev *dpio_dev = NULL; + struct dpaa2_eventq *evq_info; + struct qbman_swp *swp; - RTE_SET_USED(priorities); + EVENTDEV_INIT_FUNC_TRACE(); - return (int)nb_links; -err: - for (n = 0; n < i; n++) { - evq_info = &priv->evq_info[queues[n]]; - qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal, - evq_info->dpcon->channel_index, 0); - dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio, - 0, dpaa2_portal->dpio_dev->token, - evq_info->dpcon->dpcon_id); + RTE_SET_USED(dev); + RTE_SET_USED(queues); + + for (i = 0; i < nb_unlinks; i++) { + evq_info = &dpaa2_portal->evq_info[queues[i]]; + + if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) { + /* todo dpaa2_portal shall have dpio_dev-no per lcore*/ + dpio_dev = DPAA2_PER_LCORE_DPIO; + swp = DPAA2_PER_LCORE_PORTAL; + + qbman_swp_push_set(swp, + evq_info->dpcon->channel_index, 0); + dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, + dpio_dev->token, + evq_info->dpcon->dpcon_id); + } + memset(evq_info, 0, sizeof(struct dpaa2_eventq)); + if (dpaa2_portal->num_linked_evq) + dpaa2_portal->num_linked_evq--; } - return ret; + + if (!dpaa2_portal->num_linked_evq) + dpaa2_portal->is_port_linked = false; + + return (int)nb_unlinks; } + static int dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *timeout_ticks) { - uint32_t scale = 1; + uint32_t scale = 1000*1000; EVENTDEV_INIT_FUNC_TRACE(); @@ -677,6 +797,151 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev, return 0; } +#ifdef RTE_LIBRTE_SECURITY +static int +dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev, + const struct rte_cryptodev *cdev, + uint32_t *caps) +{ + const char *name = cdev->data->name; + + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + + if (!strncmp(name, "dpsec-", 6)) + *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP; + else + return -1; + + return 0; +} + +static int +dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev, + const struct rte_event *ev) +{ + struct dpaa2_eventdev *priv = dev->data->dev_private; + uint8_t ev_qid = ev->queue_id; + uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; + int i, ret; + + EVENTDEV_INIT_FUNC_TRACE(); + + for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { + ret = dpaa2_sec_eventq_attach(cryptodev, i, + dpcon_id, ev); + if (ret) { + DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n", + ret); + goto fail; + } + } + return 0; +fail: + for (i = (i - 1); i >= 0 ; i--) + dpaa2_sec_eventq_detach(cryptodev, i); + + return ret; +} + +static int +dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev, + int32_t rx_queue_id, + const struct rte_event *ev) +{ + struct dpaa2_eventdev *priv = dev->data->dev_private; + uint8_t ev_qid = ev->queue_id; + uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; + int ret; + + EVENTDEV_INIT_FUNC_TRACE(); + + if (rx_queue_id == -1) + return dpaa2_eventdev_crypto_queue_add_all(dev, + cryptodev, ev); + + ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id, + dpcon_id, ev); + if (ret) { + DPAA2_EVENTDEV_ERR( + "dpaa2_sec_eventq_attach failed: ret: %d\n", ret); + return ret; + } + return 0; +} + +static int +dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev, + const struct rte_cryptodev *cdev) +{ + int i, ret; + + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + + for (i = 0; i < cdev->data->nb_queue_pairs; i++) { + ret = dpaa2_sec_eventq_detach(cdev, i); + if (ret) { + DPAA2_EVENTDEV_ERR( + "dpaa2_sec_eventq_detach failed:ret %d\n", ret); + return ret; + } + } + + return 0; +} + +static int +dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev, + int32_t rx_queue_id) +{ + int ret; + + EVENTDEV_INIT_FUNC_TRACE(); + + if (rx_queue_id == -1) + return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev); + + ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id); + if (ret) { + DPAA2_EVENTDEV_ERR( + "dpaa2_sec_eventq_detach failed: ret: %d\n", ret); + return ret; + } + + return 0; +} + +static int +dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev) +{ + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + RTE_SET_USED(cryptodev); + + return 0; +} + +static int +dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev) +{ + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + RTE_SET_USED(cryptodev); + + return 0; +} +#endif + static struct rte_eventdev_ops dpaa2_eventdev_ops = { .dev_infos_get = dpaa2_eventdev_info_get, .dev_configure = dpaa2_eventdev_configure, @@ -698,6 +963,13 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = { .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del, .eth_rx_adapter_start = dpaa2_eventdev_eth_start, .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop, +#ifdef RTE_LIBRTE_SECURITY + .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get, + .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add, + .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del, + .crypto_adapter_start = dpaa2_eventdev_crypto_start, + .crypto_adapter_stop = dpaa2_eventdev_crypto_stop, +#endif }; static int @@ -789,6 +1061,8 @@ dpaa2_eventdev_create(const char *name) priv->max_event_queues++; } while (dpcon_dev && dpci_dev); + RTE_LOG(INFO, PMD, "%s eventdev created\n", name); + return 0; fail: return -EFAULT; diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h index 229f66af..c847b3ea 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.h +++ b/drivers/event/dpaa2/dpaa2_eventdev.h @@ -21,6 +21,7 @@ #define DPAA2_EVENT_MAX_QUEUES 16 #define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT 1 #define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1) +#define DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL #define DPAA2_EVENT_MAX_QUEUE_FLOWS 2048 #define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8 #define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS 0 @@ -41,6 +42,15 @@ enum { (RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | \ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | \ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) + +/**< Crypto Rx adapter cap to return If the packet transfers from + * the cryptodev to eventdev with DPAA2 devices. + */ +#define RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP \ + (RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW | \ + RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND | \ + RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) + /**< Ethernet Rx adapter cap to return If the packet transfers from * the ethdev to eventdev with DPAA2 devices. */ @@ -56,17 +66,27 @@ struct dpaa2_dpcon_dev { uint8_t channel_index; }; -struct evq_info_t { +struct dpaa2_eventq { /* DPcon device */ struct dpaa2_dpcon_dev *dpcon; /* Attached DPCI device */ struct dpaa2_dpci_dev *dpci; + /* Mapped event port */ + struct dpaa2_io_portal_t *event_port; /* Configuration provided by the user */ uint32_t event_queue_cfg; + uint32_t event_queue_id; +}; + +struct dpaa2_port { + struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES]; + uint8_t num_linked_evq; + uint8_t is_port_linked; + uint64_t timeout_us; }; struct dpaa2_eventdev { - struct evq_info_t evq_info[DPAA2_EVENT_MAX_QUEUES]; + struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES]; uint32_t dequeue_timeout_ns; uint8_t max_event_queues; uint8_t nb_event_queues; diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build index de7a4615..a0db6fc2 100644 --- a/drivers/event/dpaa2/meson.build +++ b/drivers/event/dpaa2/meson.build @@ -1,11 +1,14 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018 NXP +version = 2 + if host_machine.system() != 'linux' build = false endif -deps += ['bus_vdev', 'pmd_dpaa2'] +deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec'] sources = files('dpaa2_hw_dpcon.c', 'dpaa2_eventdev.c') allow_experimental_apis = true +includes += include_directories('../../crypto/dpaa2_sec/') |