From ca33590b6af032bff57d9cc70455660466a654b2 Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Mon, 19 Feb 2018 11:16:57 +0000 Subject: New upstream version 18.02 Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3 Signed-off-by: Luca Boccassi --- drivers/net/dpaa/Makefile | 35 +-- drivers/net/dpaa/dpaa_ethdev.c | 462 ++++++++++++++++++++++++------ drivers/net/dpaa/dpaa_ethdev.h | 82 +++--- drivers/net/dpaa/dpaa_rxtx.c | 367 ++++++++++++++++++------ drivers/net/dpaa/dpaa_rxtx.h | 36 +-- drivers/net/dpaa/rte_pmd_dpaa.h | 39 +++ drivers/net/dpaa/rte_pmd_dpaa_version.map | 10 + 7 files changed, 762 insertions(+), 269 deletions(-) create mode 100644 drivers/net/dpaa/rte_pmd_dpaa.h (limited to 'drivers/net/dpaa') diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile index 171686ec..9c2a5ea8 100644 --- a/drivers/net/dpaa/Makefile +++ b/drivers/net/dpaa/Makefile @@ -1,32 +1,6 @@ -# BSD LICENSE +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2017 NXP # -# Copyright 2017 NXP. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of NXP nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. include $(RTE_SDK)/mk/rte.vars.mk RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa @@ -43,7 +17,9 @@ CFLAGS += -I$(RTE_SDK_DPAA)/ CFLAGS += -I$(RTE_SDK_DPAA)/include CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/ +CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/base/qbman CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa +CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include @@ -60,4 +36,7 @@ LDLIBS += -lrte_mempool_dpaa LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA_PMD)-include := rte_pmd_dpaa.h + include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index cf5a2ecf..9b69ef45 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -1,34 +1,8 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2017 NXP. + * Copyright 2017 NXP * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Freescale Semiconductor, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* System headers */ #include @@ -54,7 +28,7 @@ #include #include #include -#include +#include #include #include @@ -64,6 +38,7 @@ #include #include +#include #include #include @@ -72,6 +47,17 @@ /* Keep track of whether QMAN and BMAN have been globally initialized */ static int is_global_init; +/* At present we only allow up to 4 push mode queues - as each of this queue + * need dedicated portal and we are short of portals. + */ +#define DPAA_MAX_PUSH_MODE_QUEUE 4 + +static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; +static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ + + +/* Per FQ Taildrop in frame count */ +static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; struct rte_dpaa_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; @@ -107,23 +93,42 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { offsetof(struct dpaa_if_stats, tund)}, }; +static struct rte_dpaa_driver rte_dpaa_pmd; + +static inline void +dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) +{ + memset(opts, 0, sizeof(struct qm_mcc_initfq)); + opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; + opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | + QM_FQCTRL_PREFERINCACHE; + opts->fqd.context_a.stashing.exclusive = 0; + if (dpaa_svr_family != SVR_LS1046A_FAMILY) + opts->fqd.context_a.stashing.annotation_cl = + DPAA_IF_RX_ANNOTATION_STASH; + opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; + opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; +} + static int dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + VLAN_TAG_SIZE; PMD_INIT_FUNC_TRACE(); - if (mtu < ETHER_MIN_MTU) + if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) return -EINVAL; - if (mtu > ETHER_MAX_LEN) + if (frame_size > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.jumbo_frame = 1; else dev->data->dev_conf.rxmode.jumbo_frame = 0; - dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu; + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - fman_if_set_maxfrm(dpaa_intf->fif, mtu); + fman_if_set_maxfrm(dpaa_intf->fif, frame_size); return 0; } @@ -131,15 +136,19 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { + struct dpaa_if *dpaa_intf = dev->data->dev_private; + PMD_INIT_FUNC_TRACE(); if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= - DPAA_MAX_RX_PKT_LEN) - return dpaa_mtu_set(dev, + DPAA_MAX_RX_PKT_LEN) { + fman_if_set_maxfrm(dpaa_intf->fif, dev->data->dev_conf.rxmode.max_rx_pkt_len); - else + return 0; + } else { return -1; + } } return 0; } @@ -212,19 +221,17 @@ dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, DPAA_PMD_ERR("Unable to open SoC device"); return -ENOTSUP; /* Not supported on this infra */ } - - ret = fscanf(svr_file, "svr:%x", &svr_ver); - if (ret <= 0) { + if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) + dpaa_svr_family = svr_ver & SVR_MASK; + else DPAA_PMD_ERR("Unable to read SoC device"); - return -ENOTSUP; /* Not supported on this infra */ - } - ret = snprintf(fw_version, fw_size, - "svr:%x-fman-v%x", - svr_ver, - fman_ip_rev); + fclose(svr_file); + ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", + svr_ver, fman_ip_rev); ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) return ret; else @@ -443,12 +450,16 @@ static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev) static int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, - uint16_t nb_desc __rte_unused, + uint16_t nb_desc, unsigned int socket_id __rte_unused, const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mp) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; + struct qm_mcc_initfq opts = {0}; + u32 flags = 0; + int ret; PMD_INIT_FUNC_TRACE(); @@ -484,7 +495,153 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, dpaa_intf->name, fd_offset, fman_if_get_fdoff(dpaa_intf->fif)); } - dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx]; + /* checking if push mode only, no error check for now */ + if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) { + dpaa_push_queue_idx++; + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; + opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | + QM_FQCTRL_CTXASTASHING | + QM_FQCTRL_PREFERINCACHE; + opts.fqd.context_a.stashing.exclusive = 0; + /* In muticore scenario stashing becomes a bottleneck on LS1046. + * So do not enable stashing in this case + */ + if (dpaa_svr_family != SVR_LS1046A_FAMILY) + opts.fqd.context_a.stashing.annotation_cl = + DPAA_IF_RX_ANNOTATION_STASH; + opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; + opts.fqd.context_a.stashing.context_cl = + DPAA_IF_RX_CONTEXT_STASH; + + /*Create a channel and associate given queue with the channel*/ + qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); + opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; + opts.fqd.dest.channel = rxq->ch_id; + opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; + flags = QMAN_INITFQ_FLAG_SCHED; + + /* Configure tail drop */ + if (dpaa_intf->cgr_rx) { + opts.we_mask |= QM_INITFQ_WE_CGID; + opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; + opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; + } + ret = qman_init_fq(rxq, flags, &opts); + if (ret) + DPAA_PMD_ERR("Channel/Queue association failed. fqid %d" + " ret: %d", rxq->fqid, ret); + rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; + rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; + rxq->is_static = true; + } + dev->data->rx_queues[queue_idx] = rxq; + + /* configure the CGR size as per the desc size */ + if (dpaa_intf->cgr_rx) { + struct qm_mcc_initcgr cgr_opts = {0}; + + /* Enable tail drop with cgr on this queue */ + qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); + ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); + if (ret) { + DPAA_PMD_WARN( + "rx taildrop modify fail on fqid %d (ret=%d)", + rxq->fqid, ret); + } + } + + return 0; +} + +int __rte_experimental +dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, + int eth_rx_queue_id, + u16 ch_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +{ + int ret; + u32 flags = 0; + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; + struct qm_mcc_initfq opts = {0}; + + if (dpaa_push_mode_max_queue) + DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n" + "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", + dpaa_push_mode_max_queue); + + dpaa_poll_queue_default_config(&opts); + + switch (queue_conf->ev.sched_type) { + case RTE_SCHED_TYPE_ATOMIC: + opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; + /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary + * configuration with HOLD_ACTIVE setting + */ + opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); + rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; + break; + case RTE_SCHED_TYPE_ORDERED: + DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); + return -1; + default: + opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; + rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; + break; + } + + opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; + opts.fqd.dest.channel = ch_id; + opts.fqd.dest.wq = queue_conf->ev.priority; + + if (dpaa_intf->cgr_rx) { + opts.we_mask |= QM_INITFQ_WE_CGID; + opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; + opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; + } + + flags = QMAN_INITFQ_FLAG_SCHED; + + ret = qman_init_fq(rxq, flags, &opts); + if (ret) { + DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d", + rxq->fqid, ret); + return ret; + } + + /* copy configuration which needs to be filled during dequeue */ + memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); + dev->data->rx_queues[eth_rx_queue_id] = rxq; + + return ret; +} + +int __rte_experimental +dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, + int eth_rx_queue_id) +{ + struct qm_mcc_initfq opts; + int ret; + u32 flags = 0; + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; + + dpaa_poll_queue_default_config(&opts); + + if (dpaa_intf->cgr_rx) { + opts.we_mask |= QM_INITFQ_WE_CGID; + opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; + opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; + } + + ret = qman_init_fq(rxq, flags, &opts); + if (ret) { + DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", + rxq->fqid, ret); + } + + rxq->cb.dqrr_dpdk_cb = NULL; + dev->data->rx_queues[eth_rx_queue_id] = NULL; return 0; } @@ -515,6 +672,22 @@ static void dpaa_eth_tx_queue_release(void *txq __rte_unused) PMD_INIT_FUNC_TRACE(); } +static uint32_t +dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; + u32 frm_cnt = 0; + + PMD_INIT_FUNC_TRACE(); + + if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { + RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n", + rx_queue_id, frm_cnt); + } + return frm_cnt; +} + static int dpaa_link_down(struct rte_eth_dev *dev) { PMD_INIT_FUNC_TRACE(); @@ -666,6 +839,7 @@ static struct eth_dev_ops dpaa_devops = { .tx_queue_setup = dpaa_eth_tx_queue_setup, .rx_queue_release = dpaa_eth_rx_queue_release, .tx_queue_release = dpaa_eth_tx_queue_release, + .rx_queue_count = dpaa_dev_rx_queue_count, .flow_ctrl_get = dpaa_flow_ctrl_get, .flow_ctrl_set = dpaa_flow_ctrl_set, @@ -692,6 +866,45 @@ static struct eth_dev_ops dpaa_devops = { .fw_version_get = dpaa_fw_version_get, }; +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) +{ + if (strcmp(dev->device->driver->name, + drv->driver.name)) + return false; + + return true; +} + +static bool +is_dpaa_supported(struct rte_eth_dev *dev) +{ + return is_device_supported(dev, &rte_dpaa_pmd); +} + +int __rte_experimental +rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) +{ + struct rte_eth_dev *dev; + struct dpaa_if *dpaa_intf; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_dpaa_supported(dev)) + return -ENOTSUP; + + dpaa_intf = dev->data->dev_private; + + if (on) + fman_if_loopback_enable(dpaa_intf->fif); + else + fman_if_loopback_disable(dpaa_intf->fif); + + return 0; +} + static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf) { struct rte_eth_fc_conf *fc_conf; @@ -720,11 +933,21 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf) } /* Initialise an Rx FQ */ -static int dpaa_rx_queue_init(struct qman_fq *fq, +static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, uint32_t fqid) { - struct qm_mcc_initfq opts; + struct qm_mcc_initfq opts = {0}; int ret; + u32 flags = 0; + struct qm_mcc_initcgr cgr_opts = { + .we_mask = QM_CGR_WE_CS_THRES | + QM_CGR_WE_CSTD_EN | + QM_CGR_WE_MODE, + .cgr = { + .cstd_en = QM_CGR_EN, + .mode = QMAN_CGR_MODE_FRAME + } + }; PMD_INIT_FUNC_TRACE(); @@ -742,24 +965,28 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, fqid, ret); return ret; } + fq->is_static = false; - opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | - QM_INITFQ_WE_CONTEXTA; - - opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; - opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | - QM_FQCTRL_PREFERINCACHE; - opts.fqd.context_a.stashing.exclusive = 0; - opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH; - opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; - opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; + dpaa_poll_queue_default_config(&opts); - /*Enable tail drop */ - opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH; - opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE; - qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1); - - ret = qman_init_fq(fq, 0, &opts); + if (cgr_rx) { + /* Enable tail drop with cgr on this queue */ + qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); + cgr_rx->cb = NULL; + ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, + &cgr_opts); + if (ret) { + DPAA_PMD_WARN( + "rx taildrop init fail on rx fqid %d (ret=%d)", + fqid, ret); + goto without_cgr; + } + opts.we_mask |= QM_INITFQ_WE_CGID; + opts.fqd.cgid = cgr_rx->cgrid; + opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; + } +without_cgr: + ret = qman_init_fq(fq, flags, &opts); if (ret) DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret); return ret; @@ -769,7 +996,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, static int dpaa_tx_queue_init(struct qman_fq *fq, struct fman_if *fman_intf) { - struct qm_mcc_initfq opts; + struct qm_mcc_initfq opts = {0}; int ret; PMD_INIT_FUNC_TRACE(); @@ -800,7 +1027,7 @@ static int dpaa_tx_queue_init(struct qman_fq *fq, /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) { - struct qm_mcc_initfq opts; + struct qm_mcc_initfq opts = {0}; int ret; PMD_INIT_FUNC_TRACE(); @@ -841,6 +1068,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) struct fm_eth_port_cfg *cfg; struct fman_if *fman_intf; struct fman_if_bpool *bp, *tmp_bp; + uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; PMD_INIT_FUNC_TRACE(); @@ -867,6 +1095,16 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) else num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; + /* if push mode queues to be enabled. Currenly we are allowing only + * one queue per thread. + */ + if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { + dpaa_push_mode_max_queue = + atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); + if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) + dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } + /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX * queues. */ @@ -877,28 +1115,62 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->rx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); + if (!dpaa_intf->rx_queues) { + DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); + return -ENOMEM; + } + + /* If congestion control is enabled globally*/ + if (td_threshold) { + dpaa_intf->cgr_rx = rte_zmalloc(NULL, + sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); + if (!dpaa_intf->cgr_rx) { + DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); + ret = -ENOMEM; + goto free_rx; + } + + ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); + if (ret != num_rx_fqs) { + DPAA_PMD_WARN("insufficient CGRIDs available"); + ret = -EINVAL; + goto free_rx; + } + } else { + dpaa_intf->cgr_rx = NULL; + } + for (loop = 0; loop < num_rx_fqs; loop++) { fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * DPAA_PCD_FQID_MULTIPLIER + loop; - ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid); + + if (dpaa_intf->cgr_rx) + dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; + + ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], + dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, + fqid); if (ret) - return ret; + goto free_rx; dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; } dpaa_intf->nb_rx_queues = num_rx_fqs; - /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */ + /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ num_cores = rte_lcore_count(); dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * num_cores, MAX_CACHELINE); - if (!dpaa_intf->tx_queues) - return -ENOMEM; + if (!dpaa_intf->tx_queues) { + DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); + ret = -ENOMEM; + goto free_rx; + } for (loop = 0; loop < num_cores; loop++) { ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], fman_intf); if (ret) - return ret; + goto free_tx; dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; } dpaa_intf->nb_tx_queues = num_cores; @@ -935,13 +1207,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) DPAA_PMD_ERR("Failed to allocate %d bytes needed to " "store MAC addresses", ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); - rte_free(dpaa_intf->rx_queues); - rte_free(dpaa_intf->tx_queues); - dpaa_intf->rx_queues = NULL; - dpaa_intf->tx_queues = NULL; - dpaa_intf->nb_rx_queues = 0; - dpaa_intf->nb_tx_queues = 0; - return -ENOMEM; + ret = -ENOMEM; + goto free_tx; } /* copy the primary mac address */ @@ -967,12 +1234,25 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) fman_if_stats_reset(fman_intf); return 0; + +free_tx: + rte_free(dpaa_intf->tx_queues); + dpaa_intf->tx_queues = NULL; + dpaa_intf->nb_tx_queues = 0; + +free_rx: + rte_free(dpaa_intf->cgr_rx); + rte_free(dpaa_intf->rx_queues); + dpaa_intf->rx_queues = NULL; + dpaa_intf->nb_rx_queues = 0; + return ret; } static int dpaa_dev_uninit(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + int loop; PMD_INIT_FUNC_TRACE(); @@ -990,6 +1270,18 @@ dpaa_dev_uninit(struct rte_eth_dev *dev) if (dpaa_intf->fc_conf) rte_free(dpaa_intf->fc_conf); + /* Release RX congestion Groups */ + if (dpaa_intf->cgr_rx) { + for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) + qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); + + qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, + dpaa_intf->nb_rx_queues); + } + + rte_free(dpaa_intf->cgr_rx); + dpaa_intf->cgr_rx = NULL; + rte_free(dpaa_intf->rx_queues); dpaa_intf->rx_queues = NULL; @@ -1046,10 +1338,12 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, is_global_init = 1; } - ret = rte_dpaa_portal_init((void *)1); - if (ret) { - DPAA_PMD_ERR("Unable to initialize portal"); - return ret; + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init((void *)1); + if (ret) { + DPAA_PMD_ERR("Unable to initialize portal"); + return ret; + } } eth_dev = rte_eth_dev_allocate(dpaa_dev->name); diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h index 5457d61b..c051ae32 100644 --- a/drivers/net/dpaa/dpaa_ethdev.h +++ b/drivers/net/dpaa/dpaa_ethdev.h @@ -1,41 +1,16 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2017 NXP. + * Copyright 2017 NXP * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Freescale Semiconductor, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __DPAA_ETHDEV_H__ #define __DPAA_ETHDEV_H__ /* System headers */ #include -#include +#include +#include #include #include @@ -43,11 +18,6 @@ #include #include -/* DPAA SoC identifier; If this is not available, it can be concluded - * that board is non-DPAA. Single slot is currently supported. - */ -#define DPAA_SOC_ID_FILE "sys/devices/soc0/soc_id" - #define DPAA_MBUF_HW_ANNOTATION 64 #define DPAA_FD_PTA_SIZE 64 @@ -55,6 +25,13 @@ #error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM" #endif +/* mbuf->seqn will be used to store event entry index for + * driver specific usage. For parallel mode queues, invalid + * index will be set and for atomic mode queues, valid value + * ranging from 1 to 16. + */ +#define DPAA_INVALID_MBUF_SEQN 0 + /* we will re-use the HEADROOM for annotation in RX */ #define DPAA_HW_BUF_RESERVE 0 #define DPAA_PACKET_LAYOUT_ALIGN 64 @@ -65,24 +42,27 @@ #define DPAA_MIN_RX_BUF_SIZE 512 #define DPAA_MAX_RX_PKT_LEN 10240 -/* RX queue tail drop threshold - * currently considering 32 KB packets. - */ -#define CONG_THRESHOLD_RX_Q (32 * 1024) +/* RX queue tail drop threshold (CGR Based) in frame count */ +#define CGR_RX_PERFQ_THRESH 256 /*max mac filter for memac(8) including primary mac addr*/ #define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1) /*Maximum number of slots available in TX ring*/ -#define MAX_TX_RING_SLOTS 8 +#define DPAA_TX_BURST_SIZE 7 + +#ifndef VLAN_TAG_SIZE +#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ +#endif /* PCD frame queues */ #define DPAA_PCD_FQID_START 0x400 #define DPAA_PCD_FQID_MULTIPLIER 0x100 #define DPAA_DEFAULT_NUM_PCD_QUEUES 1 +#define DPAA_MAX_NUM_PCD_QUEUES 32 #define DPAA_IF_TX_PRIORITY 3 -#define DPAA_IF_RX_PRIORITY 4 +#define DPAA_IF_RX_PRIORITY 0 #define DPAA_IF_DEBUG_PRIORITY 7 #define DPAA_IF_RX_ANNOTATION_STASH 1 @@ -129,6 +109,7 @@ struct dpaa_if { char *name; const struct fm_eth_port_cfg *cfg; struct qman_fq *rx_queues; + struct qman_cgr *cgr_rx; struct qman_fq *tx_queues; struct qman_fq debug_queues[2]; uint16_t nb_rx_queues; @@ -179,4 +160,25 @@ struct dpaa_if_stats { uint64_t tund; /** #include #include -#include +#include #include #include #include #include #include +#include +#include #include "dpaa_ethdev.h" #include "dpaa_rxtx.h" #include #include +#include #include #include #include @@ -122,12 +99,6 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m, DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); switch (prs) { - case DPAA_PKT_TYPE_NONE: - m->packet_type = 0; - break; - case DPAA_PKT_TYPE_ETHER: - m->packet_type = RTE_PTYPE_L2_ETHER; - break; case DPAA_PKT_TYPE_IPV4: m->packet_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; @@ -136,6 +107,9 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m, m->packet_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; break; + case DPAA_PKT_TYPE_ETHER: + m->packet_type = RTE_PTYPE_L2_ETHER; + break; case DPAA_PKT_TYPE_IPV4_FRAG: case DPAA_PKT_TYPE_IPV4_FRAG_UDP: case DPAA_PKT_TYPE_IPV4_FRAG_TCP: @@ -198,6 +172,9 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m, m->packet_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; break; + case DPAA_PKT_TYPE_NONE: + m->packet_type = 0; + break; /* More switch cases can be added */ default: dpaa_slow_parsing(m, prs); @@ -208,12 +185,11 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m, << DPAA_PKT_L3_LEN_SHIFT; /* Set the hash values */ - m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash)); - m->ol_flags = PKT_RX_RSS_HASH; + m->hash.rss = (uint32_t)(annot->hash); /* All packets with Bad checksum are dropped by interface (and * corresponding notification issued to RX error queues). */ - m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD; /* Check if Vlan is present */ if (prs & DPAA_PARSE_VLAN_MASK) @@ -297,8 +273,32 @@ static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf, fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC; } +static inline void +dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr) +{ + if (!mbuf->packet_type) { + struct rte_net_hdr_lens hdr_lens; + + mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, + RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK + | RTE_PTYPE_L4_MASK); + mbuf->l2_len = hdr_lens.l2_len; + mbuf->l3_len = hdr_lens.l3_len; + } + if (mbuf->data_off < (DEFAULT_TX_ICEOF + + sizeof(struct dpaa_eth_parse_results_t))) { + DPAA_DP_LOG(DEBUG, "Checksum offload Err: " + "Not enough Headroom " + "space for correct Checksum offload." + "So Calculating checksum in Software."); + dpaa_checksum(mbuf); + } else { + dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); + } +} + struct rte_mbuf * -dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid) +dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) { struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; @@ -309,7 +309,7 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid) DPAA_DP_LOG(DEBUG, "Received an SG frame"); - vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd)); + vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); if (!vaddr) { DPAA_PMD_ERR("unable to convert physical address"); return NULL; @@ -318,7 +318,7 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid) sg_temp = &sgt[i++]; hw_sg_to_cpu(sg_temp); temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size); - sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp)); + sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp)); first_seg = (struct rte_mbuf *)((char *)sg_vaddr - bp_info->meta_data_size); @@ -334,7 +334,8 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid) while (i < DPAA_SGT_MAX_ENTRIES) { sg_temp = &sgt[i++]; hw_sg_to_cpu(sg_temp); - sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp)); + sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, + qm_sg_entry_get64(sg_temp)); cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - bp_info->meta_data_size); cur_seg->data_off = sg_temp->offset; @@ -356,34 +357,33 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid) return first_seg; } -static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd, - uint32_t ifid) +static inline struct rte_mbuf * +dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) { - struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); struct rte_mbuf *mbuf; + struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); void *ptr; uint8_t format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; - uint16_t offset = - (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; - uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK; + uint16_t offset; + uint32_t length; DPAA_DP_LOG(DEBUG, " FD--->MBUF"); if (unlikely(format == qm_fd_sg)) return dpaa_eth_sg_to_mbuf(fd, ifid); + ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); + + rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); + + offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; + length = fd->opaque & DPAA_FD_LENGTH_MASK; + /* Ignoring case when format != qm_fd_contig */ dpaa_display_frame(fd); - ptr = rte_dpaa_mem_ptov(fd->addr); - /* Ignoring case when ptr would be NULL. That is only possible incase - * of a corrupted packet - */ mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); - /* Prefetch the Parse results and packet data to L1 */ - rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); - rte_prefetch0((void *)((uint8_t *)ptr + offset)); mbuf->data_off = offset; mbuf->data_len = length; @@ -399,6 +399,161 @@ static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd, return mbuf; } +void +dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, + void **bufs, int num_bufs) +{ + struct rte_mbuf *mbuf; + struct dpaa_bp_info *bp_info; + const struct qm_fd *fd; + void *ptr; + struct dpaa_if *dpaa_intf; + uint16_t offset, i; + uint32_t length; + uint8_t format; + + if (dpaa_svr_family != SVR_LS1046A_FAMILY) { + bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid); + ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd)); + rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); + bufs[0] = (struct rte_mbuf *)((char *)ptr - + bp_info->meta_data_size); + } + + for (i = 0; i < num_bufs; i++) { + if (dpaa_svr_family != SVR_LS1046A_FAMILY && + i < num_bufs - 1) { + bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid); + ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd)); + rte_prefetch0((void *)((uint8_t *)ptr + + DEFAULT_RX_ICEOF)); + bufs[i + 1] = (struct rte_mbuf *)((char *)ptr - + bp_info->meta_data_size); + } + + fd = &dqrr[i]->fd; + dpaa_intf = fq[i]->dpaa_intf; + + format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> + DPAA_FD_FORMAT_SHIFT; + if (unlikely(format == qm_fd_sg)) { + bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); + continue; + } + + offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> + DPAA_FD_OFFSET_SHIFT; + length = fd->opaque & DPAA_FD_LENGTH_MASK; + + mbuf = bufs[i]; + mbuf->data_off = offset; + mbuf->data_len = length; + mbuf->pkt_len = length; + mbuf->port = dpaa_intf->ifid; + + mbuf->nb_segs = 1; + mbuf->ol_flags = 0; + mbuf->next = NULL; + rte_mbuf_refcnt_set(mbuf, 1); + dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + } +} + +void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs) +{ + struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid); + void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd)); + + /* In case of LS1046, annotation stashing is disabled due to L2 cache + * being bottleneck in case of multicore scanario for this platform. + * So we prefetch the annoation beforehand, so that it is available + * in cache when accessed. + */ + if (dpaa_svr_family == SVR_LS1046A_FAMILY) + rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); + + *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); +} + +static uint16_t +dpaa_eth_queue_portal_rx(struct qman_fq *fq, + struct rte_mbuf **bufs, + uint16_t nb_bufs) +{ + int ret; + + if (unlikely(fq->qp == NULL)) { + ret = rte_dpaa_portal_fq_init((void *)0, fq); + if (ret) { + DPAA_PMD_ERR("Failure in affining portal %d", ret); + return 0; + } + } + + return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp); +} + +enum qman_cb_dqrr_result +dpaa_rx_cb_parallel(void *event, + struct qman_portal *qm __always_unused, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr, + void **bufs) +{ + u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; + struct rte_mbuf *mbuf; + struct rte_event *ev = (struct rte_event *)event; + + mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); + ev->event_ptr = (void *)mbuf; + ev->flow_id = fq->ev.flow_id; + ev->sub_event_type = fq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = fq->ev.sched_type; + ev->queue_id = fq->ev.queue_id; + ev->priority = fq->ev.priority; + ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN; + mbuf->seqn = DPAA_INVALID_MBUF_SEQN; + *bufs = mbuf; + + return qman_cb_dqrr_consume; +} + +enum qman_cb_dqrr_result +dpaa_rx_cb_atomic(void *event, + struct qman_portal *qm __always_unused, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr, + void **bufs) +{ + u8 index; + u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; + struct rte_mbuf *mbuf; + struct rte_event *ev = (struct rte_event *)event; + + mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); + ev->event_ptr = (void *)mbuf; + ev->flow_id = fq->ev.flow_id; + ev->sub_event_type = fq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = fq->ev.sched_type; + ev->queue_id = fq->ev.queue_id; + ev->priority = fq->ev.priority; + + /* Save active dqrr entries */ + index = DQRR_PTR2IDX(dqrr); + DPAA_PER_LCORE_DQRR_SIZE++; + DPAA_PER_LCORE_DQRR_HELD |= 1 << index; + DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf; + ev->impl_opaque = index + 1; + mbuf->seqn = (uint32_t)index + 1; + *bufs = mbuf; + + return qman_cb_dqrr_defer; +} + uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) @@ -408,10 +563,15 @@ uint16_t dpaa_eth_queue_rx(void *q, uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; int ret; - ret = rte_dpaa_portal_init((void *)0); - if (ret) { - DPAA_PMD_ERR("Failure in affining portal"); - return 0; + if (likely(fq->is_static)) + return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs); + + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init((void *)0); + if (ret) { + DPAA_PMD_ERR("Failure in affining portal"); + return 0; + } } ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? @@ -445,7 +605,8 @@ static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d", (uint64_t)bufs.addr, bufs.bpid); - buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size; + buf = (uint64_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) + - bp_info->meta_data_size; if (!buf) goto out; @@ -463,11 +624,11 @@ static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf, if (!dpaa_mbuf) return NULL; - memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *) + memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + RTE_PKTMBUF_HEADROOM, (void *) ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len); /* Copy only the required fields */ - dpaa_mbuf->data_off = mbuf->data_off; + dpaa_mbuf->data_off = RTE_PKTMBUF_HEADROOM; dpaa_mbuf->pkt_len = mbuf->pkt_len; dpaa_mbuf->ol_flags = mbuf->ol_flags; dpaa_mbuf->packet_type = mbuf->packet_type; @@ -504,6 +665,15 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, fd->opaque_addr = 0; if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { + if (!mbuf->packet_type) { + struct rte_net_hdr_lens hdr_lens; + + mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, + RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK + | RTE_PTYPE_L4_MASK); + mbuf->l2_len = hdr_lens.l2_len; + mbuf->l3_len = hdr_lens.l3_len; + } if (temp->data_off < DEFAULT_TX_ICEOF + sizeof(struct dpaa_eth_parse_results_t)) temp->data_off = DEFAULT_TX_ICEOF @@ -610,18 +780,8 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, rte_pktmbuf_free(mbuf); } - if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { - if (mbuf->data_off < (DEFAULT_TX_ICEOF + - sizeof(struct dpaa_eth_parse_results_t))) { - DPAA_DP_LOG(DEBUG, "Checksum offload Err: " - "Not enough Headroom " - "space for correct Checksum offload." - "So Calculating checksum in Software."); - dpaa_checksum(mbuf); - } else { - dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); - } - } + if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) + dpaa_unsegmented_checksum(mbuf, fd_arr); } /* Handle all mbufs on dpaa BMAN managed pool */ @@ -665,7 +825,7 @@ tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf, return 1; } - DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, dpaa_intf->bp_info->bpid); + DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid); return 0; } @@ -676,25 +836,42 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) struct rte_mbuf *mbuf, *mi = NULL; struct rte_mempool *mp; struct dpaa_bp_info *bp_info; - struct qm_fd fd_arr[MAX_TX_RING_SLOTS]; - uint32_t frames_to_send, loop, i = 0; + struct qm_fd fd_arr[DPAA_TX_BURST_SIZE]; + uint32_t frames_to_send, loop, sent = 0; uint16_t state; int ret; + uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0}; - ret = rte_dpaa_portal_init((void *)0); - if (ret) { - DPAA_PMD_ERR("Failure in affining portal"); - return 0; + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init((void *)0); + if (ret) { + DPAA_PMD_ERR("Failure in affining portal"); + return 0; + } } DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q); while (nb_bufs) { - frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs; - for (loop = 0; loop < frames_to_send; loop++, i++) { - mbuf = bufs[i]; - if (RTE_MBUF_DIRECT(mbuf)) { + frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ? + DPAA_TX_BURST_SIZE : nb_bufs; + for (loop = 0; loop < frames_to_send; loop++) { + mbuf = *(bufs++); + if (likely(RTE_MBUF_DIRECT(mbuf))) { mp = mbuf->pool; + bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); + if (likely(mp->ops_index == + bp_info->dpaa_ops_index && + mbuf->nb_segs == 1 && + rte_mbuf_refcnt_read(mbuf) == 1)) { + DPAA_MBUF_TO_CONTIG_FD(mbuf, + &fd_arr[loop], bp_info->bpid); + if (mbuf->ol_flags & + DPAA_TX_CKSUM_OFFLOAD_MASK) + dpaa_unsegmented_checksum(mbuf, + &fd_arr[loop]); + continue; + } } else { mi = rte_mbuf_from_indirect(mbuf); mp = mi->pool; @@ -726,20 +903,34 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) goto send_pkts; } } + seqn = mbuf->seqn; + if (seqn != DPAA_INVALID_MBUF_SEQN) { + index = seqn - 1; + if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { + flags[loop] = + ((index & QM_EQCR_DCA_IDXMASK) << 8); + flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; + DPAA_PER_LCORE_DQRR_SIZE--; + DPAA_PER_LCORE_DQRR_HELD &= + ~(1 << index); + } + } } send_pkts: loop = 0; while (loop < frames_to_send) { loop += qman_enqueue_multi(q, &fd_arr[loop], - frames_to_send - loop); + &flags[loop], + frames_to_send - loop); } nb_bufs -= frames_to_send; + sent += frames_to_send; } - DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q); + DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q); - return i; + return sent; } uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h index 2ffc4ffe..d3e63516 100644 --- a/drivers/net/dpaa/dpaa_rxtx.h +++ b/drivers/net/dpaa/dpaa_rxtx.h @@ -1,34 +1,8 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2017 NXP. + * Copyright 2017 NXP * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Freescale Semiconductor, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __DPDK_RXTX_H__ @@ -288,10 +262,14 @@ uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused, uint16_t nb_bufs __rte_unused); -struct rte_mbuf *dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid); +struct rte_mbuf *dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid); int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, struct qm_fd *fd, uint32_t bpid); +void dpaa_rx_cb(struct qman_fq **fq, + struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs); + +void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs); #endif diff --git a/drivers/net/dpaa/rte_pmd_dpaa.h b/drivers/net/dpaa/rte_pmd_dpaa.h new file mode 100644 index 00000000..38405ec0 --- /dev/null +++ b/drivers/net/dpaa/rte_pmd_dpaa.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef _PMD_DPAA_H_ +#define _PMD_DPAA_H_ + +/** + * @file rte_pmd_dpaa.h + * + * NXP dpaa PMD specific functions. + * + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + */ + +#include + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Enable/Disable TX loopback + * + * @param port + * The port identifier of the Ethernet device. + * @param on + * 1 - Enable TX loopback. + * 0 - Disable TX loopback. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + */ +int __rte_experimental +rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on); + +#endif /* _PMD_DPAA_H_ */ diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map index a70bd197..3b937b10 100644 --- a/drivers/net/dpaa/rte_pmd_dpaa_version.map +++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map @@ -2,3 +2,13 @@ DPDK_17.11 { local: *; }; + +EXPERIMENTAL { + global: + + dpaa_eth_eventq_attach; + dpaa_eth_eventq_detach; + rte_pmd_dpaa_set_tx_loopback; + + local: *; +} DPDK_17.11; -- cgit 1.2.3-korg