aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/dpaa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/dpaa')
-rw-r--r--drivers/net/dpaa/Makefile63
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.c1109
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.h182
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.c756
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.h297
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa_version.map4
6 files changed, 2411 insertions, 0 deletions
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
new file mode 100644
index 00000000..171686ec
--- /dev/null
+++ b/drivers/net/dpaa/Makefile
@@ -0,0 +1,63 @@
+# BSD LICENSE
+#
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -I$(RTE_SDK_DPAA)/
+CFLAGS += -I$(RTE_SDK_DPAA)/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
+
+EXPORT_MAP := rte_pmd_dpaa_version.map
+
+LIBABIVER := 1
+
+# Interfaces with DPDK
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
new file mode 100644
index 00000000..cf5a2ecf
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -0,0 +1,1109 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+#include <dpaa_mempool.h>
+
+#include <dpaa_ethdev.h>
+#include <dpaa_rxtx.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <fsl_fman.h>
+
+/* Keep track of whether QMAN and BMAN have been globally initialized */
+static int is_global_init;
+
+struct rte_dpaa_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t offset;
+};
+
+static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
+ {"rx_align_err",
+ offsetof(struct dpaa_if_stats, raln)},
+ {"rx_valid_pause",
+ offsetof(struct dpaa_if_stats, rxpf)},
+ {"rx_fcs_err",
+ offsetof(struct dpaa_if_stats, rfcs)},
+ {"rx_vlan_frame",
+ offsetof(struct dpaa_if_stats, rvlan)},
+ {"rx_frame_err",
+ offsetof(struct dpaa_if_stats, rerr)},
+ {"rx_drop_err",
+ offsetof(struct dpaa_if_stats, rdrp)},
+ {"rx_undersized",
+ offsetof(struct dpaa_if_stats, rund)},
+ {"rx_oversize_err",
+ offsetof(struct dpaa_if_stats, rovr)},
+ {"rx_fragment_pkt",
+ offsetof(struct dpaa_if_stats, rfrg)},
+ {"tx_valid_pause",
+ offsetof(struct dpaa_if_stats, txpf)},
+ {"tx_fcs_err",
+ offsetof(struct dpaa_if_stats, terr)},
+ {"tx_vlan_frame",
+ offsetof(struct dpaa_if_stats, tvlan)},
+ {"rx_undersized",
+ offsetof(struct dpaa_if_stats, tund)},
+};
+
+static int
+dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mtu < ETHER_MIN_MTU)
+ return -EINVAL;
+ if (mtu > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu;
+
+ fman_if_set_maxfrm(dpaa_intf->fif, mtu);
+
+ return 0;
+}
+
+static int
+dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ DPAA_MAX_RX_PKT_LEN)
+ return dpaa_mtu_set(dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ else
+ return -1;
+ }
+ return 0;
+}
+
+static const uint32_t *
+dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
+ return ptypes;
+ return NULL;
+}
+
+static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Change tx callback to the real one */
+ dev->tx_pkt_burst = dpaa_eth_queue_tx;
+ fman_if_enable_rx(dpaa_intf->fif);
+
+ return 0;
+}
+
+static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_disable_rx(dpaa_intf->fif);
+ dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+}
+
+static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_stop(dev);
+}
+
+static int
+dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
+ char *fw_version,
+ size_t fw_size)
+{
+ int ret;
+ FILE *svr_file = NULL;
+ unsigned int svr_ver = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (!svr_file) {
+ DPAA_PMD_ERR("Unable to open SoC device");
+ return -ENOTSUP; /* Not supported on this infra */
+ }
+
+ ret = fscanf(svr_file, "svr:%x", &svr_ver);
+ if (ret <= 0) {
+ DPAA_PMD_ERR("Unable to read SoC device");
+ return -ENOTSUP; /* Not supported on this infra */
+ }
+
+ ret = snprintf(fw_version, fw_size,
+ "svr:%x-fman-v%x",
+ svr_ver,
+ fman_ip_rev);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
+ dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
+ dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
+ dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = 0;
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G);
+ dev_info->rx_offload_capa =
+ (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ dev_info->tx_offload_capa =
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+}
+
+static int dpaa_eth_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_link *link = &dev->data->dev_link;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpaa_intf->fif->mac_type == fman_mac_1g)
+ link->link_speed = 1000;
+ else if (dpaa_intf->fif->mac_type == fman_mac_10g)
+ link->link_speed = 10000;
+ else
+ DPAA_PMD_ERR("invalid link_speed: %s, %d",
+ dpaa_intf->name, dpaa_intf->fif->mac_type);
+
+ link->link_status = dpaa_intf->valid;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = ETH_LINK_AUTONEG;
+ return 0;
+}
+
+static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_stats_get(dpaa_intf->fif, stats);
+ return 0;
+}
+
+static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_stats_reset(dpaa_intf->fif);
+}
+
+static int
+dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+ uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+
+ if (xstats == NULL)
+ return 0;
+
+ if (n < num)
+ return num;
+
+ fman_if_stats_get_all(dpaa_intf->fif, values,
+ sizeof(struct dpaa_if_stats) / 8);
+
+ for (i = 0; i < num; i++) {
+ xstats[i].id = i;
+ xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
+ }
+ return i;
+}
+
+static int
+dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+
+ if (xstats_names != NULL)
+ for (i = 0; i < stat_cnt; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s",
+ dpaa_xstats_strings[i].name);
+
+ return stat_cnt;
+}
+
+static int
+dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+ uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+
+ if (!ids) {
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ if (n < stat_cnt)
+ return stat_cnt;
+
+ if (!values)
+ return 0;
+
+ fman_if_stats_get_all(dpaa_intf->fif, values_copy,
+ sizeof(struct dpaa_if_stats));
+
+ for (i = 0; i < stat_cnt; i++)
+ values[i] =
+ values_copy[dpaa_xstats_strings[i].offset / 8];
+
+ return stat_cnt;
+ }
+
+ dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= stat_cnt) {
+ DPAA_PMD_ERR("id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
+static int
+dpaa_xstats_get_names_by_id(
+ struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+ struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
+
+ if (!ids)
+ return dpaa_xstats_get_names(dev, xstats_names, limit);
+
+ dpaa_xstats_get_names(dev, xstats_names_copy, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ DPAA_PMD_ERR("id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
+static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_promiscuous_enable(dpaa_intf->fif);
+}
+
+static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_promiscuous_disable(dpaa_intf->fif);
+}
+
+static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_set_mcast_filter_table(dpaa_intf->fif);
+}
+
+static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_reset_mcast_filter_table(dpaa_intf->fif);
+}
+
+static
+int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
+
+ if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
+ struct fman_if_ic_params icp;
+ uint32_t fd_offset;
+ uint32_t bp_size;
+
+ if (!mp->pool_data) {
+ DPAA_PMD_ERR("Not an offloaded buffer pool!");
+ return -1;
+ }
+ dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ memset(&icp, 0, sizeof(icp));
+ /* set ICEOF for to the default value , which is 0*/
+ icp.iciof = DEFAULT_ICIOF;
+ icp.iceof = DEFAULT_RX_ICEOF;
+ icp.icsz = DEFAULT_ICSZ;
+ fman_if_set_ic_params(dpaa_intf->fif, &icp);
+
+ fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+ fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
+
+ /* Buffer pool size should be equal to Dataroom Size*/
+ bp_size = rte_pktmbuf_data_room_size(mp);
+ fman_if_set_bp(dpaa_intf->fif, mp->size,
+ dpaa_intf->bp_info->bpid, bp_size);
+ dpaa_intf->valid = 1;
+ DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
+ dpaa_intf->name, fd_offset,
+ fman_if_get_fdoff(dpaa_intf->fif));
+ }
+ dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
+
+ return 0;
+}
+
+static
+void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static
+int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
+ dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
+ return 0;
+}
+
+static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int dpaa_link_down(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_stop(dev);
+ return 0;
+}
+
+static int dpaa_link_up(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_start(dev);
+ return 0;
+}
+
+static int
+dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_fc_conf *net_fc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(dpaa_intf->fc_conf)) {
+ dpaa_intf->fc_conf = rte_zmalloc(NULL,
+ sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+ if (!dpaa_intf->fc_conf) {
+ DPAA_PMD_ERR("unable to save flow control info");
+ return -ENOMEM;
+ }
+ }
+ net_fc = dpaa_intf->fc_conf;
+
+ if (fc_conf->high_water < fc_conf->low_water) {
+ DPAA_PMD_ERR("Incorrect Flow Control Configuration");
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == RTE_FC_NONE) {
+ return 0;
+ } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
+ fc_conf->mode == RTE_FC_FULL) {
+ fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
+ fc_conf->low_water,
+ dpaa_intf->bp_info->bpid);
+ if (fc_conf->pause_time)
+ fman_if_set_fc_quanta(dpaa_intf->fif,
+ fc_conf->pause_time);
+ }
+
+ /* Save the information in dpaa device */
+ net_fc->pause_time = fc_conf->pause_time;
+ net_fc->high_water = fc_conf->high_water;
+ net_fc->low_water = fc_conf->low_water;
+ net_fc->send_xon = fc_conf->send_xon;
+ net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
+ net_fc->mode = fc_conf->mode;
+ net_fc->autoneg = fc_conf->autoneg;
+
+ return 0;
+}
+
+static int
+dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (net_fc) {
+ fc_conf->pause_time = net_fc->pause_time;
+ fc_conf->high_water = net_fc->high_water;
+ fc_conf->low_water = net_fc->low_water;
+ fc_conf->send_xon = net_fc->send_xon;
+ fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
+ fc_conf->mode = net_fc->mode;
+ fc_conf->autoneg = net_fc->autoneg;
+ return 0;
+ }
+ ret = fman_if_get_fc_threshold(dpaa_intf->fif);
+ if (ret) {
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
+ } else {
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return 0;
+}
+
+static int
+dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ int ret;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
+
+ if (ret)
+ RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
+ " err = %d", ret);
+ return 0;
+}
+
+static void
+dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
+ uint32_t index)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_clear_mac_addr(dpaa_intf->fif, index);
+}
+
+static void
+dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ int ret;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
+ if (ret)
+ RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
+}
+
+static struct eth_dev_ops dpaa_devops = {
+ .dev_configure = dpaa_eth_dev_configure,
+ .dev_start = dpaa_eth_dev_start,
+ .dev_stop = dpaa_eth_dev_stop,
+ .dev_close = dpaa_eth_dev_close,
+ .dev_infos_get = dpaa_eth_dev_info,
+ .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
+
+ .rx_queue_setup = dpaa_eth_rx_queue_setup,
+ .tx_queue_setup = dpaa_eth_tx_queue_setup,
+ .rx_queue_release = dpaa_eth_rx_queue_release,
+ .tx_queue_release = dpaa_eth_tx_queue_release,
+
+ .flow_ctrl_get = dpaa_flow_ctrl_get,
+ .flow_ctrl_set = dpaa_flow_ctrl_set,
+
+ .link_update = dpaa_eth_link_update,
+ .stats_get = dpaa_eth_stats_get,
+ .xstats_get = dpaa_dev_xstats_get,
+ .xstats_get_by_id = dpaa_xstats_get_by_id,
+ .xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
+ .xstats_get_names = dpaa_xstats_get_names,
+ .xstats_reset = dpaa_eth_stats_reset,
+ .stats_reset = dpaa_eth_stats_reset,
+ .promiscuous_enable = dpaa_eth_promiscuous_enable,
+ .promiscuous_disable = dpaa_eth_promiscuous_disable,
+ .allmulticast_enable = dpaa_eth_multicast_enable,
+ .allmulticast_disable = dpaa_eth_multicast_disable,
+ .mtu_set = dpaa_mtu_set,
+ .dev_set_link_down = dpaa_link_down,
+ .dev_set_link_up = dpaa_link_up,
+ .mac_addr_add = dpaa_dev_add_mac_addr,
+ .mac_addr_remove = dpaa_dev_remove_mac_addr,
+ .mac_addr_set = dpaa_dev_set_mac_addr,
+
+ .fw_version_get = dpaa_fw_version_get,
+};
+
+static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
+{
+ struct rte_eth_fc_conf *fc_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(dpaa_intf->fc_conf)) {
+ dpaa_intf->fc_conf = rte_zmalloc(NULL,
+ sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+ if (!dpaa_intf->fc_conf) {
+ DPAA_PMD_ERR("unable to save flow control info");
+ return -ENOMEM;
+ }
+ }
+ fc_conf = dpaa_intf->fc_conf;
+ ret = fman_if_get_fc_threshold(dpaa_intf->fif);
+ if (ret) {
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
+ } else {
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return 0;
+}
+
+/* Initialise an Rx FQ */
+static int dpaa_rx_queue_init(struct qman_fq *fq,
+ uint32_t fqid)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+
+ DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
+ ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
+ fqid, ret);
+ return ret;
+ }
+
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA;
+
+ opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+ opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_a.stashing.exclusive = 0;
+ opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
+ opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+
+ /*Enable tail drop */
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
+ opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
+ qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
+
+ ret = qman_init_fq(fq, 0, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
+ return ret;
+}
+
+/* Initialise a Tx FQ */
+static int dpaa_tx_queue_init(struct qman_fq *fq,
+ struct fman_if *fman_intf)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
+ return ret;
+ }
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.dest.channel = fman_intf->tx_channel_id;
+ opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
+ opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_b = 0;
+ /* no tx-confirmation */
+ opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
+ opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+ DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
+ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
+ return ret;
+}
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
+static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+ /* "map" this Rx FQ to one of the interfaces Tx FQID */
+ DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
+ ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return ret;
+ }
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
+ opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
+ ret = qman_init_fq(fq, 0, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return ret;
+}
+#endif
+
+/* Initialise a network interface */
+static int
+dpaa_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int num_cores, num_rx_fqs, fqid;
+ int loop, ret = 0;
+ int dev_id;
+ struct rte_dpaa_device *dpaa_device;
+ struct dpaa_if *dpaa_intf;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+ struct fman_if_bpool *bp, *tmp_bp;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
+ dev_id = dpaa_device->id.dev_id;
+ dpaa_intf = eth_dev->data->dev_private;
+ cfg = &dpaa_netcfg->port_cfg[dev_id];
+ fman_intf = cfg->fman_if;
+
+ dpaa_intf->name = dpaa_device->name;
+
+ /* save fman_if & cfg in the interface struture */
+ dpaa_intf->fif = fman_intf;
+ dpaa_intf->ifid = dev_id;
+ dpaa_intf->cfg = cfg;
+
+ /* Initialize Rx FQ's */
+ if (getenv("DPAA_NUM_RX_QUEUES"))
+ num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+ else
+ num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+
+ /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
+ * queues.
+ */
+ if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
+ DPAA_PMD_ERR("Invalid number of RX queues\n");
+ return -EINVAL;
+ }
+
+ dpaa_intf->rx_queues = rte_zmalloc(NULL,
+ sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+ for (loop = 0; loop < num_rx_fqs; loop++) {
+ fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+ DPAA_PCD_FQID_MULTIPLIER + loop;
+ ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
+ if (ret)
+ return ret;
+ dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
+ }
+ dpaa_intf->nb_rx_queues = num_rx_fqs;
+
+ /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
+ num_cores = rte_lcore_count();
+ dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
+ num_cores, MAX_CACHELINE);
+ if (!dpaa_intf->tx_queues)
+ return -ENOMEM;
+
+ for (loop = 0; loop < num_cores; loop++) {
+ ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
+ fman_intf);
+ if (ret)
+ return ret;
+ dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
+ }
+ dpaa_intf->nb_tx_queues = num_cores;
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+ dpaa_debug_queue_init(&dpaa_intf->debug_queues[
+ DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
+ dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
+ dpaa_debug_queue_init(&dpaa_intf->debug_queues[
+ DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
+ dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
+#endif
+
+ DPAA_PMD_DEBUG("All frame queues created");
+
+ /* Get the initial configuration for flow control */
+ dpaa_fc_set_default(dpaa_intf);
+
+ /* reset bpool list, initialize bpool dynamically */
+ list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
+ list_del(&bp->node);
+ free(bp);
+ }
+
+ /* Populate ethdev structure */
+ eth_dev->dev_ops = &dpaa_devops;
+ eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+ eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+ ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+ rte_free(dpaa_intf->rx_queues);
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->rx_queues = NULL;
+ dpaa_intf->tx_queues = NULL;
+ dpaa_intf->nb_rx_queues = 0;
+ dpaa_intf->nb_tx_queues = 0;
+ return -ENOMEM;
+ }
+
+ /* copy the primary mac address */
+ ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+ RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dpaa_device->name,
+ fman_intf->mac_addr.addr_bytes[0],
+ fman_intf->mac_addr.addr_bytes[1],
+ fman_intf->mac_addr.addr_bytes[2],
+ fman_intf->mac_addr.addr_bytes[3],
+ fman_intf->mac_addr.addr_bytes[4],
+ fman_intf->mac_addr.addr_bytes[5]);
+
+ /* Disable RX mode */
+ fman_if_discard_rx_errors(fman_intf);
+ fman_if_disable_rx(fman_intf);
+ /* Disable promiscuous mode */
+ fman_if_promiscuous_disable(fman_intf);
+ /* Disable multicast */
+ fman_if_reset_mcast_filter_table(fman_intf);
+ /* Reset interface statistics */
+ fman_if_stats_reset(fman_intf);
+
+ return 0;
+}
+
+static int
+dpaa_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (!dpaa_intf) {
+ DPAA_PMD_WARN("Already closed or not started");
+ return -1;
+ }
+
+ dpaa_eth_dev_close(dev);
+
+ /* release configuration memory */
+ if (dpaa_intf->fc_conf)
+ rte_free(dpaa_intf->fc_conf);
+
+ rte_free(dpaa_intf->rx_queues);
+ dpaa_intf->rx_queues = NULL;
+
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->tx_queues = NULL;
+
+ /* free memory for storing MAC addresses */
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ return 0;
+}
+
+static int
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev)
+{
+ int diag;
+ int ret;
+ struct rte_eth_dev *eth_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* In case of secondary process, the device is already configured
+ * and no further action is required, except portal initialization
+ * and verifying secondary attachment to port name.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
+ if (!eth_dev)
+ return -ENOMEM;
+ return 0;
+ }
+
+ if (!is_global_init) {
+ /* One time load of Qman/Bman drivers */
+ ret = qman_global_init();
+ if (ret) {
+ DPAA_PMD_ERR("QMAN initialization failed: %d",
+ ret);
+ return ret;
+ }
+ ret = bman_global_init();
+ if (ret) {
+ DPAA_PMD_ERR("BMAN initialization failed: %d",
+ ret);
+ return ret;
+ }
+
+ is_global_init = 1;
+ }
+
+ ret = rte_dpaa_portal_init((void *)1);
+ if (ret) {
+ DPAA_PMD_ERR("Unable to initialize portal");
+ return ret;
+ }
+
+ eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ eth_dev->data->dev_private = rte_zmalloc(
+ "ethdev private structure",
+ sizeof(struct dpaa_if),
+ RTE_CACHE_LINE_SIZE);
+ if (!eth_dev->data->dev_private) {
+ DPAA_PMD_ERR("Cannot allocate memzone for port data");
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
+
+ eth_dev->device = &dpaa_dev->device;
+ eth_dev->device->driver = &dpaa_drv->driver;
+ dpaa_dev->eth_dev = eth_dev;
+
+ /* Invoke PMD device initialization function */
+ diag = dpaa_dev_init(eth_dev);
+ if (diag == 0)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+ return diag;
+}
+
+static int
+rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_eth_dev *eth_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = dpaa_dev->eth_dev;
+ dpaa_dev_uninit(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_dpaa_driver rte_dpaa_pmd = {
+ .drv_type = FSL_DPAA_ETH,
+ .probe = rte_dpaa_probe,
+ .remove = rte_dpaa_remove,
+};
+
+RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
new file mode 100644
index 00000000..5457d61b
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -0,0 +1,182 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __DPAA_ETHDEV_H__
+#define __DPAA_ETHDEV_H__
+
+/* System headers */
+#include <stdbool.h>
+#include <rte_ethdev.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+/* DPAA SoC identifier; If this is not available, it can be concluded
+ * that board is non-DPAA. Single slot is currently supported.
+ */
+#define DPAA_SOC_ID_FILE "sys/devices/soc0/soc_id"
+
+#define DPAA_MBUF_HW_ANNOTATION 64
+#define DPAA_FD_PTA_SIZE 64
+
+#if (DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
+#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
+#endif
+
+/* we will re-use the HEADROOM for annotation in RX */
+#define DPAA_HW_BUF_RESERVE 0
+#define DPAA_PACKET_LAYOUT_ALIGN 64
+
+/* Alignment to use for cpu-local structs to avoid coherency problems. */
+#define MAX_CACHELINE 64
+
+#define DPAA_MIN_RX_BUF_SIZE 512
+#define DPAA_MAX_RX_PKT_LEN 10240
+
+/* RX queue tail drop threshold
+ * currently considering 32 KB packets.
+ */
+#define CONG_THRESHOLD_RX_Q (32 * 1024)
+
+/*max mac filter for memac(8) including primary mac addr*/
+#define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1)
+
+/*Maximum number of slots available in TX ring*/
+#define MAX_TX_RING_SLOTS 8
+
+/* PCD frame queues */
+#define DPAA_PCD_FQID_START 0x400
+#define DPAA_PCD_FQID_MULTIPLIER 0x100
+#define DPAA_DEFAULT_NUM_PCD_QUEUES 1
+
+#define DPAA_IF_TX_PRIORITY 3
+#define DPAA_IF_RX_PRIORITY 4
+#define DPAA_IF_DEBUG_PRIORITY 7
+
+#define DPAA_IF_RX_ANNOTATION_STASH 1
+#define DPAA_IF_RX_DATA_STASH 1
+#define DPAA_IF_RX_CONTEXT_STASH 0
+
+/* Each "debug" FQ is represented by one of these */
+#define DPAA_DEBUG_FQ_RX_ERROR 0
+#define DPAA_DEBUG_FQ_TX_ERROR 1
+
+#define DPAA_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+
+#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM)
+
+/* DPAA Frame descriptor macros */
+
+#define DPAA_FD_CMD_FCO 0x80000000
+/**< Frame queue Context Override */
+#define DPAA_FD_CMD_RPD 0x40000000
+/**< Read Prepended Data */
+#define DPAA_FD_CMD_UPD 0x20000000
+/**< Update Prepended Data */
+#define DPAA_FD_CMD_DTC 0x10000000
+/**< Do IP/TCP/UDP Checksum */
+#define DPAA_FD_CMD_DCL4C 0x10000000
+/**< Didn't calculate L4 Checksum */
+#define DPAA_FD_CMD_CFQ 0x00ffffff
+/**< Confirmation Frame Queue */
+
+/* Each network interface is represented by one of these */
+struct dpaa_if {
+ int valid;
+ char *name;
+ const struct fm_eth_port_cfg *cfg;
+ struct qman_fq *rx_queues;
+ struct qman_fq *tx_queues;
+ struct qman_fq debug_queues[2];
+ uint16_t nb_rx_queues;
+ uint16_t nb_tx_queues;
+ uint32_t ifid;
+ struct fman_if *fif;
+ struct dpaa_bp_info *bp_info;
+ struct rte_eth_fc_conf *fc_conf;
+};
+
+struct dpaa_if_stats {
+ /* Rx Statistics Counter */
+ uint64_t reoct; /**<Rx Eth Octets Counter */
+ uint64_t roct; /**<Rx Octet Counters */
+ uint64_t raln; /**<Rx Alignment Error Counter */
+ uint64_t rxpf; /**<Rx valid Pause Frame */
+ uint64_t rfrm; /**<Rx Frame counter */
+ uint64_t rfcs; /**<Rx frame check seq error */
+ uint64_t rvlan; /**<Rx Vlan Frame Counter */
+ uint64_t rerr; /**<Rx Frame error */
+ uint64_t ruca; /**<Rx Unicast */
+ uint64_t rmca; /**<Rx Multicast */
+ uint64_t rbca; /**<Rx Broadcast */
+ uint64_t rdrp; /**<Rx Dropped Packet */
+ uint64_t rpkt; /**<Rx packet */
+ uint64_t rund; /**<Rx undersized packets */
+ uint32_t res_x[14];
+ uint64_t rovr; /**<Rx oversized but good */
+ uint64_t rjbr; /**<Rx oversized with bad csum */
+ uint64_t rfrg; /**<Rx fragment Packet */
+ uint64_t rcnp; /**<Rx control packets (0x8808 */
+ uint64_t rdrntp; /**<Rx dropped due to FIFO overflow */
+ uint32_t res01d0[12];
+ /* Tx Statistics Counter */
+ uint64_t teoct; /**<Tx eth octets */
+ uint64_t toct; /**<Tx Octets */
+ uint32_t res0210[2];
+ uint64_t txpf; /**<Tx valid pause frame */
+ uint64_t tfrm; /**<Tx frame counter */
+ uint64_t tfcs; /**<Tx FCS error */
+ uint64_t tvlan; /**<Tx Vlan Frame */
+ uint64_t terr; /**<Tx frame error */
+ uint64_t tuca; /**<Tx Unicast */
+ uint64_t tmca; /**<Tx Multicast */
+ uint64_t tbca; /**<Tx Broadcast */
+ uint32_t res0258[2];
+ uint64_t tpkt; /**<Tx Packet */
+ uint64_t tund; /**<Tx Undersized */
+};
+
+#endif
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
new file mode 100644
index 00000000..41e57f2e
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -0,0 +1,756 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* System headers */
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <limits.h>
+#include <sched.h>
+#include <pthread.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+
+#include "dpaa_ethdev.h"
+#include "dpaa_rxtx.h"
+#include <rte_dpaa_bus.h>
+#include <dpaa_mempool.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
+ do { \
+ (_fd)->cmd = 0; \
+ (_fd)->opaque_addr = 0; \
+ (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
+ (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
+ (_fd)->opaque |= (_mbuf)->pkt_len; \
+ (_fd)->addr = (_mbuf)->buf_iova; \
+ (_fd)->bpid = _bpid; \
+ } while (0)
+
+#if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
+void dpaa_display_frame(const struct qm_fd *fd)
+{
+ int ii;
+ char *ptr;
+
+ printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n",
+ __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format,
+ fd->offset, fd->length20, fd->status);
+
+ ptr = (char *)rte_dpaa_mem_ptov(fd->addr);
+ ptr += fd->offset;
+ printf("%02x ", *ptr);
+ for (ii = 1; ii < fd->length20; ii++) {
+ printf("%02x ", *ptr);
+ if ((ii % 16) == 0)
+ printf("\n");
+ ptr++;
+ }
+ printf("\n");
+}
+#else
+#define dpaa_display_frame(a)
+#endif
+
+static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
+ uint64_t prs __rte_unused)
+{
+ DPAA_DP_LOG(DEBUG, "Slow parsing");
+ /*TBD:XXX: to be implemented*/
+}
+
+static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
+ uint64_t fd_virt_addr)
+{
+ struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
+ uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
+
+ DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
+
+ switch (prs) {
+ case DPAA_PKT_TYPE_NONE:
+ m->packet_type = 0;
+ break;
+ case DPAA_PKT_TYPE_ETHER:
+ m->packet_type = RTE_PTYPE_L2_ETHER;
+ break;
+ case DPAA_PKT_TYPE_IPV4:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4;
+ break;
+ case DPAA_PKT_TYPE_IPV6:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6;
+ break;
+ case DPAA_PKT_TYPE_IPV4_FRAG:
+ case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
+ case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
+ case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
+ break;
+ case DPAA_PKT_TYPE_IPV6_FRAG:
+ case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
+ case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
+ case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT;
+ break;
+ case DPAA_PKT_TYPE_IPV4_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
+ break;
+ /* More switch cases can be added */
+ default:
+ dpaa_slow_parsing(m, prs);
+ }
+
+ m->tx_offload = annot->parse.ip_off[0];
+ m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
+ << DPAA_PKT_L3_LEN_SHIFT;
+
+ /* Set the hash values */
+ m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
+ m->ol_flags = PKT_RX_RSS_HASH;
+ /* All packets with Bad checksum are dropped by interface (and
+ * corresponding notification issued to RX error queues).
+ */
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ /* Check if Vlan is present */
+ if (prs & DPAA_PARSE_VLAN_MASK)
+ m->ol_flags |= PKT_RX_VLAN;
+ /* Packet received without stripping the vlan */
+}
+
+static inline void dpaa_checksum(struct rte_mbuf *mbuf)
+{
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+ DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
+
+ if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV4_EXT)) {
+ ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ ipv4_hdr->hdr_checksum = 0;
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6_EXT))
+ ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+ if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+ struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
+ mbuf->l3_len);
+ tcp_hdr->cksum = 0;
+ if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+ tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+ tcp_hdr);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+ tcp_hdr);
+ } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
+ RTE_PTYPE_L4_UDP) {
+ struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
+ mbuf->l3_len);
+ udp_hdr->dgram_cksum = 0;
+ if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+ udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+ udp_hdr);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+ udp_hdr);
+ }
+}
+
+static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
+ struct qm_fd *fd, char *prs_buf)
+{
+ struct dpaa_eth_parse_results_t *prs;
+
+ DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
+
+ prs = GET_TX_PRS(prs_buf);
+ prs->l3r = 0;
+ prs->l4r = 0;
+ if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV4_EXT))
+ prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
+ else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6_EXT))
+ prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
+
+ if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
+ prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
+ else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
+
+ prs->ip_off[0] = mbuf->l2_len;
+ prs->l4_off = mbuf->l3_len + mbuf->l2_len;
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum*/
+ fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
+}
+
+struct rte_mbuf *
+dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
+{
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+ struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
+ struct qm_sg_entry *sgt, *sg_temp;
+ void *vaddr, *sg_vaddr;
+ int i = 0;
+ uint8_t fd_offset = fd->offset;
+
+ DPAA_DP_LOG(DEBUG, "Received an SG frame");
+
+ vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd));
+ if (!vaddr) {
+ DPAA_PMD_ERR("unable to convert physical address");
+ return NULL;
+ }
+ sgt = vaddr + fd_offset;
+ sg_temp = &sgt[i++];
+ hw_sg_to_cpu(sg_temp);
+ temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
+ sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
+
+ first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+ bp_info->meta_data_size);
+ first_seg->data_off = sg_temp->offset;
+ first_seg->data_len = sg_temp->length;
+ first_seg->pkt_len = sg_temp->length;
+ rte_mbuf_refcnt_set(first_seg, 1);
+
+ first_seg->port = ifid;
+ first_seg->nb_segs = 1;
+ first_seg->ol_flags = 0;
+ prev_seg = first_seg;
+ while (i < DPAA_SGT_MAX_ENTRIES) {
+ sg_temp = &sgt[i++];
+ hw_sg_to_cpu(sg_temp);
+ sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
+ cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+ bp_info->meta_data_size);
+ cur_seg->data_off = sg_temp->offset;
+ cur_seg->data_len = sg_temp->length;
+ first_seg->pkt_len += sg_temp->length;
+ first_seg->nb_segs += 1;
+ rte_mbuf_refcnt_set(cur_seg, 1);
+ prev_seg->next = cur_seg;
+ if (sg_temp->final) {
+ cur_seg->next = NULL;
+ break;
+ }
+ prev_seg = cur_seg;
+ }
+
+ dpaa_eth_packet_info(first_seg, (uint64_t)vaddr);
+ rte_pktmbuf_free_seg(temp);
+
+ return first_seg;
+}
+
+static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
+ uint32_t ifid)
+{
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+ struct rte_mbuf *mbuf;
+ void *ptr;
+ uint8_t format =
+ (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
+ uint16_t offset =
+ (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
+ uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
+ DPAA_DP_LOG(DEBUG, " FD--->MBUF");
+
+ if (unlikely(format == qm_fd_sg))
+ return dpaa_eth_sg_to_mbuf(fd, ifid);
+
+ /* Ignoring case when format != qm_fd_contig */
+ dpaa_display_frame(fd);
+ ptr = rte_dpaa_mem_ptov(fd->addr);
+ /* Ignoring case when ptr would be NULL. That is only possible incase
+ * of a corrupted packet
+ */
+
+ mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
+ /* Prefetch the Parse results and packet data to L1 */
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+ rte_prefetch0((void *)((uint8_t *)ptr + offset));
+
+ mbuf->data_off = offset;
+ mbuf->data_len = length;
+ mbuf->pkt_len = length;
+
+ mbuf->port = ifid;
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+ dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
+
+ return mbuf;
+}
+
+uint16_t dpaa_eth_queue_rx(void *q,
+ struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ struct qman_fq *fq = q;
+ struct qm_dqrr_entry *dq;
+ uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+ int ret;
+
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+
+ ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+ DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs);
+ if (ret)
+ return 0;
+
+ do {
+ dq = qman_dequeue(fq);
+ if (!dq)
+ continue;
+ bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
+ qman_dqrr_consume(fq, dq);
+ } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+ return num_rx;
+}
+
+static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
+{
+ int ret;
+ uint64_t buf = 0;
+ struct bm_buffer bufs;
+
+ ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
+ if (ret <= 0) {
+ DPAA_PMD_WARN("Failed to allocate buffers %d", ret);
+ return (void *)buf;
+ }
+
+ DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d",
+ (uint64_t)bufs.addr, bufs.bpid);
+
+ buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size;
+ if (!buf)
+ goto out;
+
+out:
+ return (void *)buf;
+}
+
+static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
+ struct dpaa_if *dpaa_intf)
+{
+ struct rte_mbuf *dpaa_mbuf;
+
+ /* allocate pktbuffer on bpid for dpaa port */
+ dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);
+ if (!dpaa_mbuf)
+ return NULL;
+
+ memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)
+ ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
+
+ /* Copy only the required fields */
+ dpaa_mbuf->data_off = mbuf->data_off;
+ dpaa_mbuf->pkt_len = mbuf->pkt_len;
+ dpaa_mbuf->ol_flags = mbuf->ol_flags;
+ dpaa_mbuf->packet_type = mbuf->packet_type;
+ dpaa_mbuf->tx_offload = mbuf->tx_offload;
+ rte_pktmbuf_free(mbuf);
+ return dpaa_mbuf;
+}
+
+int
+dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qm_fd *fd,
+ uint32_t bpid)
+{
+ struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
+ struct rte_mbuf *temp, *mi;
+ struct qm_sg_entry *sg_temp, *sgt;
+ int i = 0;
+
+ DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
+
+ temp = rte_pktmbuf_alloc(bp_info->mp);
+ if (!temp) {
+ DPAA_PMD_ERR("Failure in allocation of mbuf");
+ return -1;
+ }
+ if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
+ + temp->data_off)) {
+ DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
+ return -1;
+ }
+
+ fd->cmd = 0;
+ fd->opaque_addr = 0;
+
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+ if (temp->data_off < DEFAULT_TX_ICEOF
+ + sizeof(struct dpaa_eth_parse_results_t))
+ temp->data_off = DEFAULT_TX_ICEOF
+ + sizeof(struct dpaa_eth_parse_results_t);
+ dcbz_64(temp->buf_addr);
+ dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
+ }
+
+ sgt = temp->buf_addr + temp->data_off;
+ fd->format = QM_FD_SG;
+ fd->addr = temp->buf_iova;
+ fd->offset = temp->data_off;
+ fd->bpid = bpid;
+ fd->length20 = mbuf->pkt_len;
+
+ while (i < DPAA_SGT_MAX_ENTRIES) {
+ sg_temp = &sgt[i++];
+ sg_temp->opaque = 0;
+ sg_temp->val = 0;
+ sg_temp->addr = cur_seg->buf_iova;
+ sg_temp->offset = cur_seg->data_off;
+ sg_temp->length = cur_seg->data_len;
+ if (RTE_MBUF_DIRECT(cur_seg)) {
+ if (rte_mbuf_refcnt_read(cur_seg) > 1) {
+ /*If refcnt > 1, invalid bpid is set to ensure
+ * buffer is not freed by HW.
+ */
+ sg_temp->bpid = 0xff;
+ rte_mbuf_refcnt_update(cur_seg, -1);
+ } else {
+ sg_temp->bpid =
+ DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
+ }
+ cur_seg = cur_seg->next;
+ } else {
+ /* Get owner MBUF from indirect buffer */
+ mi = rte_mbuf_from_indirect(cur_seg);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /*If refcnt > 1, invalid bpid is set to ensure
+ * owner buffer is not freed by HW.
+ */
+ sg_temp->bpid = 0xff;
+ } else {
+ sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
+ rte_mbuf_refcnt_update(mi, 1);
+ }
+ prev_seg = cur_seg;
+ cur_seg = cur_seg->next;
+ prev_seg->next = NULL;
+ rte_pktmbuf_free(prev_seg);
+ }
+ if (cur_seg == NULL) {
+ sg_temp->final = 1;
+ cpu_to_hw_sg(sg_temp);
+ break;
+ }
+ cpu_to_hw_sg(sg_temp);
+ }
+ return 0;
+}
+
+/* Handle mbufs which are not segmented (non SG) */
+static inline void
+tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
+ struct dpaa_bp_info *bp_info,
+ struct qm_fd *fd_arr)
+{
+ struct rte_mbuf *mi = NULL;
+
+ if (RTE_MBUF_DIRECT(mbuf)) {
+ if (rte_mbuf_refcnt_read(mbuf) > 1) {
+ /* In case of direct mbuf and mbuf being cloned,
+ * BMAN should _not_ release buffer.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
+ /* Buffer should be releasd by EAL */
+ rte_mbuf_refcnt_update(mbuf, -1);
+ } else {
+ /* In case of direct mbuf and no cloning, mbuf can be
+ * released by BMAN.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
+ }
+ } else {
+ /* This is data-containing core mbuf: 'mi' */
+ mi = rte_mbuf_from_indirect(mbuf);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /* In case of indirect mbuf, and mbuf being cloned,
+ * BMAN should _not_ release it and let EAL release
+ * it through pktmbuf_free below.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
+ } else {
+ /* In case of indirect mbuf, and no cloning, core mbuf
+ * should be released by BMAN.
+ * Increate refcnt of core mbuf so that when
+ * pktmbuf_free is called and mbuf is released, EAL
+ * doesn't try to release core mbuf which would have
+ * been released by BMAN.
+ */
+ rte_mbuf_refcnt_update(mi, 1);
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
+ }
+ rte_pktmbuf_free(mbuf);
+ }
+
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+ if (mbuf->data_off < (DEFAULT_TX_ICEOF +
+ sizeof(struct dpaa_eth_parse_results_t))) {
+ DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
+ "Not enough Headroom "
+ "space for correct Checksum offload."
+ "So Calculating checksum in Software.");
+ dpaa_checksum(mbuf);
+ } else {
+ dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
+ }
+ }
+}
+
+/* Handle all mbufs on dpaa BMAN managed pool */
+static inline uint16_t
+tx_on_dpaa_pool(struct rte_mbuf *mbuf,
+ struct dpaa_bp_info *bp_info,
+ struct qm_fd *fd_arr)
+{
+ DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
+
+ if (mbuf->nb_segs == 1) {
+ /* Case for non-segmented buffers */
+ tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
+ } else if (mbuf->nb_segs > 1 &&
+ mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
+ if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) {
+ DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
+ return 1;
+ }
+ } else {
+ DPAA_PMD_DEBUG("Number of Segments not supported");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Handle all mbufs on an external pool (non-dpaa) */
+static inline uint16_t
+tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
+ struct qm_fd *fd_arr)
+{
+ struct dpaa_if *dpaa_intf = txq->dpaa_intf;
+ struct rte_mbuf *dmable_mbuf;
+
+ DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer."
+ "Allocating an offloaded buffer");
+ dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);
+ if (!dmable_mbuf) {
+ DPAA_DP_LOG(DEBUG, "no dpaa buffers.");
+ return 1;
+ }
+
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, dpaa_intf->bp_info->bpid);
+
+ return 0;
+}
+
+uint16_t
+dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct rte_mbuf *mbuf, *mi = NULL;
+ struct rte_mempool *mp;
+ struct dpaa_bp_info *bp_info;
+ struct qm_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send, loop, i = 0;
+ uint16_t state;
+ int ret;
+
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+
+ DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
+
+ while (nb_bufs) {
+ frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs;
+ for (loop = 0; loop < frames_to_send; loop++, i++) {
+ mbuf = bufs[i];
+ if (RTE_MBUF_DIRECT(mbuf)) {
+ mp = mbuf->pool;
+ } else {
+ mi = rte_mbuf_from_indirect(mbuf);
+ mp = mi->pool;
+ }
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+ if (likely(mp->ops_index == bp_info->dpaa_ops_index)) {
+ state = tx_on_dpaa_pool(mbuf, bp_info,
+ &fd_arr[loop]);
+ if (unlikely(state)) {
+ /* Set frames_to_send & nb_bufs so
+ * that packets are transmitted till
+ * previous frame.
+ */
+ frames_to_send = loop;
+ nb_bufs = loop;
+ goto send_pkts;
+ }
+ } else {
+ state = tx_on_external_pool(q, mbuf,
+ &fd_arr[loop]);
+ if (unlikely(state)) {
+ /* Set frames_to_send & nb_bufs so
+ * that packets are transmitted till
+ * previous frame.
+ */
+ frames_to_send = loop;
+ nb_bufs = loop;
+ goto send_pkts;
+ }
+ }
+ }
+
+send_pkts:
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qman_enqueue_multi(q, &fd_arr[loop],
+ frames_to_send - loop);
+ }
+ nb_bufs -= frames_to_send;
+ }
+
+ DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q);
+
+ return i;
+}
+
+uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
+ struct rte_mbuf **bufs __rte_unused,
+ uint16_t nb_bufs __rte_unused)
+{
+ DPAA_DP_LOG(DEBUG, "Drop all packets");
+
+ /* Drop all incoming packets. No need to free packets here
+ * because the rte_eth f/w frees up the packets through tx_buffer
+ * callback in case this functions returns count less than nb_bufs
+ */
+ return 0;
+}
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
new file mode 100644
index 00000000..2ffc4ffe
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -0,0 +1,297 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPDK_RXTX_H__
+#define __DPDK_RXTX_H__
+
+/* internal offset from where IC is copied to packet buffer*/
+#define DEFAULT_ICIOF 32
+/* IC transfer size */
+#define DEFAULT_ICSZ 48
+
+/* IC offsets from buffer header address */
+#define DEFAULT_RX_ICEOF 16
+#define DEFAULT_TX_ICEOF 16
+
+/*
+ * Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define DPAA_L3_PARSE_RESULT_IPV4 0x80
+/* L3 Type field: First IP Present IPv6 */
+#define DPAA_L3_PARSE_RESULT_IPV6 0x40
+/* Values for the L4R field of the FM Parse Results
+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
+ */
+/* L4 Type field: UDP */
+#define DPAA_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define DPAA_L4_PARSE_RESULT_TCP 0x20
+
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
+ /** <Maximum number of frames to be dequeued in a single rx call*/
+
+/* FD structure masks and offset */
+#define DPAA_FD_FORMAT_MASK 0xE0000000
+#define DPAA_FD_OFFSET_MASK 0x1FF00000
+#define DPAA_FD_LENGTH_MASK 0xFFFFF
+#define DPAA_FD_FORMAT_SHIFT 29
+#define DPAA_FD_OFFSET_SHIFT 20
+
+/* Parsing mask (Little Endian) - 0x00E044ED00800000
+ * Classification Plan ID 0x00
+ * L4R 0xE0 -
+ * 0x20 - TCP
+ * 0x40 - UDP
+ * 0x80 - SCTP
+ * L3R 0xEDC4 (in Big Endian) -
+ * 0x8000 - IPv4
+ * 0x4000 - IPv6
+ * 0x8140 - IPv4 Ext + Frag
+ * 0x8040 - IPv4 Frag
+ * 0x8100 - IPv4 Ext
+ * 0x4140 - IPv6 Ext + Frag
+ * 0x4040 - IPv6 Frag
+ * 0x4100 - IPv6 Ext
+ * L2R 0x8000 (in Big Endian) -
+ * 0x8000 - Ethernet type
+ * ShimR & Logical Port ID 0x0000
+ */
+#define DPAA_PARSE_MASK 0x00E044ED00800000
+#define DPAA_PARSE_VLAN_MASK 0x0000000000700000
+
+/* Parsed values (Little Endian) */
+#define DPAA_PKT_TYPE_NONE 0x0000000000000000
+#define DPAA_PKT_TYPE_ETHER 0x0000000000800000
+#define DPAA_PKT_TYPE_IPV4 \
+ (0x0000008000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV6 \
+ (0x0000004000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_GRE \
+ (0x0000002000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV4_FRAG \
+ (0x0000400000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_FRAG \
+ (0x0000400000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_EXT \
+ (0x0000000100000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_EXT \
+ (0x0000000100000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_FRAG_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_EXT_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_IPV4_EXT_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_TUNNEL_4_4 \
+ (0x0000000800000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6 \
+ (0x0000000400000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6 \
+ (0x0000000400000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_4 \
+ (0x0000000800000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_L3_LEN_SHIFT 7
+
+/**
+ * FMan parse result array
+ */
+struct dpaa_eth_parse_results_t {
+ uint8_t lpid; /**< Logical port id */
+ uint8_t shimr; /**< Shim header result */
+ union {
+ uint16_t l2r; /**< Layer 2 result */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t ethernet:1;
+ uint16_t vlan:1;
+ uint16_t llc_snap:1;
+ uint16_t mpls:1;
+ uint16_t ppoe_ppp:1;
+ uint16_t unused_1:3;
+ uint16_t unknown_eth_proto:1;
+ uint16_t eth_frame_type:2;
+ uint16_t l2r_err:5;
+ /*00-unicast, 01-multicast, 11-broadcast*/
+#else
+ uint16_t l2r_err:5;
+ uint16_t eth_frame_type:2;
+ uint16_t unknown_eth_proto:1;
+ uint16_t unused_1:3;
+ uint16_t ppoe_ppp:1;
+ uint16_t mpls:1;
+ uint16_t llc_snap:1;
+ uint16_t vlan:1;
+ uint16_t ethernet:1;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ union {
+ uint16_t l3r; /**< Layer 3 result */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t first_ipv4:1;
+ uint16_t first_ipv6:1;
+ uint16_t gre:1;
+ uint16_t min_enc:1;
+ uint16_t last_ipv4:1;
+ uint16_t last_ipv6:1;
+ uint16_t first_info_err:1;/*0 info, 1 error*/
+ uint16_t first_ip_err_code:5;
+ uint16_t last_info_err:1; /*0 info, 1 error*/
+ uint16_t last_ip_err_code:3;
+#else
+ uint16_t last_ip_err_code:3;
+ uint16_t last_info_err:1; /*0 info, 1 error*/
+ uint16_t first_ip_err_code:5;
+ uint16_t first_info_err:1;/*0 info, 1 error*/
+ uint16_t last_ipv6:1;
+ uint16_t last_ipv4:1;
+ uint16_t min_enc:1;
+ uint16_t gre:1;
+ uint16_t first_ipv6:1;
+ uint16_t first_ipv4:1;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ union {
+ uint8_t l4r; /**< Layer 4 result */
+ struct{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint8_t l4_type:3;
+ uint8_t l4_info_err:1;
+ uint8_t l4_result:4;
+ /* if type IPSec: 1 ESP, 2 AH */
+#else
+ uint8_t l4_result:4;
+ /* if type IPSec: 1 ESP, 2 AH */
+ uint8_t l4_info_err:1;
+ uint8_t l4_type:3;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ uint8_t cplan; /**< Classification plan id */
+ uint16_t nxthdr; /**< Next Header */
+ uint16_t cksum; /**< Checksum */
+ uint32_t lcv; /**< LCV */
+ uint8_t shim_off[3]; /**< Shim offset */
+ uint8_t eth_off; /**< ETH offset */
+ uint8_t llc_snap_off; /**< LLC_SNAP offset */
+ uint8_t vlan_off[2]; /**< VLAN offset */
+ uint8_t etype_off; /**< ETYPE offset */
+ uint8_t pppoe_off; /**< PPP offset */
+ uint8_t mpls_off[2]; /**< MPLS offset */
+ uint8_t ip_off[2]; /**< IP offset */
+ uint8_t gre_off; /**< GRE offset */
+ uint8_t l4_off; /**< Layer 4 offset */
+ uint8_t nxthdr_off; /**< Parser end point */
+} __attribute__ ((__packed__));
+
+/* The structure is the Prepended Data to the Frame which is used by FMAN */
+struct annotations_t {
+ uint8_t reserved[DEFAULT_RX_ICEOF];
+ struct dpaa_eth_parse_results_t parse; /**< Pointer to Parsed result*/
+ uint64_t reserved1;
+ uint64_t hash; /**< Hash Result */
+};
+
+#define GET_ANNOTATIONS(_buf) \
+ (struct annotations_t *)(_buf)
+
+#define GET_RX_PRS(_buf) \
+ (struct dpaa_eth_parse_results_t *)((uint8_t *)(_buf) + \
+ DEFAULT_RX_ICEOF)
+
+#define GET_TX_PRS(_buf) \
+ (struct dpaa_eth_parse_results_t *)((uint8_t *)(_buf) + \
+ DEFAULT_TX_ICEOF)
+
+uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
+ struct rte_mbuf **bufs __rte_unused,
+ uint16_t nb_bufs __rte_unused);
+
+struct rte_mbuf *dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid);
+
+int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qm_fd *fd,
+ uint32_t bpid);
+
+#endif
diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map
new file mode 100644
index 00000000..a70bd197
--- /dev/null
+++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map
@@ -0,0 +1,4 @@
+DPDK_17.11 {
+
+ local: *;
+};