aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSachin <sachin.saxena@nxp.com>2016-06-17 14:18:19 +0530
committerDamjan Marion <damarion@cisco.com>2016-06-17 12:33:43 +0000
commitbd05e17d79156fb72cb12a0d4855bfeaf44bfa32 (patch)
treeb2cc52c239c5940552de624a7de11a5df1cd4fd9
parent85b528e093b93e939a63cd76feef4cfa140aac6c (diff)
NXP DPAA2 Poll Mode Driver Support in DPDK
Upstreaming of DPAA2 driver changes is in progress.This patch will temporary add the support in VPP in built DPDK. Two types of changes: 1. Driver specfic independent files. No impact on any other functionality. 2. Changes in common EAL framework. These changes are done in compile time DPAA2 specific flag, so no impact is expected on other existing features if not compiling for DPAA2. Change-Id: I02abe7189313835b51ff654b4d7e566bc0fb8327 Signed-off-by: Sachin <sachin.saxena@nxp.com>
-rw-r--r--build-data/platforms/dpaa2.mk10
-rw-r--r--dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch40404
2 files changed, 40410 insertions, 4 deletions
diff --git a/build-data/platforms/dpaa2.mk b/build-data/platforms/dpaa2.mk
index feafd728397..7833b876475 100644
--- a/build-data/platforms/dpaa2.mk
+++ b/build-data/platforms/dpaa2.mk
@@ -24,9 +24,8 @@ dpaa2_native_tools = vppapigen
dpaa2_root_packages = vpp vlib vlib-api vnet svm vpp-api-test
# DPDK configuration parameters
-#
-# We are using external DPDK module with NXP-DPAA2 platform support.
-# Compile DPDK only if "DPDK_PATH" variable is defined where we have
+dpaa2_uses_dpdk = yes
+# Compile with external DPDK only if "DPDK_PATH" variable is defined where we have
# installed DPDK libraries and headers.
ifeq ($(PLATFORM),dpaa2)
ifneq ($(DPDK_PATH),)
@@ -35,7 +34,10 @@ dpaa2_uses_external_dpdk = yes
dpaa2_dpdk_inc_dir = $(DPDK_PATH)/include/dpdk
dpaa2_dpdk_lib_dir = $(DPDK_PATH)/lib
else
-$(error Please define path <DPDK_PATH> for installed DPDK headers and libs)
+# compile using internal DPDK + NXP DPAA2 Driver patch
+dpaa2_dpdk_arch = "armv8a"
+dpaa2_dpdk_target = "arm64-dpaa2-linuxapp-gcc"
+dpaa2_dpdk_make_extra_args = "CROSS=$(dpaa2_target)-"
endif
endif
diff --git a/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch b/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch
new file mode 100644
index 00000000000..2553997c7b8
--- /dev/null
+++ b/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch
@@ -0,0 +1,40404 @@
+From b8d83a0825f2d7d0d626c00f79de7b415f8dc344 Mon Sep 17 00:00:00 2001
+From: Sachin Saxena <sachin.saxena@nxp.com>
+Date: Fri, 17 Jun 2016 12:32:28 +0530
+Subject: [PATCH 17/17] NXP DPAA2 Poll Mode Driver Support
+
+ Upstreaming of DPAA2 driver changes is in progress.This patch will
+ temporary add the support in VPP in built DPDK.
+
+ Two types of changes:
+ 1. Driver specfic independent files. No impact on any other functionality.
+ 2. Changes in common EAL framework. These changes are done in compile time DPAA2
+ specific flag, so no impact is expected on other existing features if not
+ compiling for DPAA2.
+
+Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
+---
+ config/defconfig_arm64-dpaa2-linuxapp-gcc | 61 +
+ drivers/net/Makefile | 1 +
+ drivers/net/dpaa2/Makefile | 102 +
+ drivers/net/dpaa2/dpaa2_logs.h | 77 +
+ drivers/net/dpaa2/mc/dpaiop.c | 457 ++++
+ drivers/net/dpaa2/mc/dpbp.c | 432 ++++
+ drivers/net/dpaa2/mc/dpci.c | 501 ++++
+ drivers/net/dpaa2/mc/dpcon.c | 401 +++
+ drivers/net/dpaa2/mc/dpdbg.c | 547 +++++
+ drivers/net/dpaa2/mc/dpdcei.c | 449 ++++
+ drivers/net/dpaa2/mc/dpdmai.c | 452 ++++
+ drivers/net/dpaa2/mc/dpdmux.c | 567 +++++
+ drivers/net/dpaa2/mc/dpio.c | 468 ++++
+ drivers/net/dpaa2/mc/dpmac.c | 422 ++++
+ drivers/net/dpaa2/mc/dpmcp.c | 312 +++
+ drivers/net/dpaa2/mc/dpmng.c | 58 +
+ drivers/net/dpaa2/mc/dpni.c | 1907 +++++++++++++++
+ drivers/net/dpaa2/mc/dprc.c | 786 ++++++
+ drivers/net/dpaa2/mc/dprtc.c | 509 ++++
+ drivers/net/dpaa2/mc/dpseci.c | 502 ++++
+ drivers/net/dpaa2/mc/dpsw.c | 1639 +++++++++++++
+ drivers/net/dpaa2/mc/fsl_dpaiop.h | 494 ++++
+ drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h | 190 ++
+ drivers/net/dpaa2/mc/fsl_dpbp.h | 438 ++++
+ drivers/net/dpaa2/mc/fsl_dpbp_cmd.h | 172 ++
+ drivers/net/dpaa2/mc/fsl_dpci.h | 594 +++++
+ drivers/net/dpaa2/mc/fsl_dpci_cmd.h | 200 ++
+ drivers/net/dpaa2/mc/fsl_dpcon.h | 407 +++
+ drivers/net/dpaa2/mc/fsl_dpcon_cmd.h | 162 ++
+ drivers/net/dpaa2/mc/fsl_dpdbg.h | 635 +++++
+ drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h | 249 ++
+ drivers/net/dpaa2/mc/fsl_dpdcei.h | 515 ++++
+ drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h | 182 ++
+ drivers/net/dpaa2/mc/fsl_dpdmai.h | 521 ++++
+ drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h | 191 ++
+ drivers/net/dpaa2/mc/fsl_dpdmux.h | 724 ++++++
+ drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 256 ++
+ drivers/net/dpaa2/mc/fsl_dpio.h | 460 ++++
+ drivers/net/dpaa2/mc/fsl_dpio_cmd.h | 184 ++
+ drivers/net/dpaa2/mc/fsl_dpkg.h | 174 ++
+ drivers/net/dpaa2/mc/fsl_dpmac.h | 593 +++++
+ drivers/net/dpaa2/mc/fsl_dpmac_cmd.h | 195 ++
+ drivers/net/dpaa2/mc/fsl_dpmcp.h | 332 +++
+ drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h | 135 +
+ drivers/net/dpaa2/mc/fsl_dpmng.h | 74 +
+ drivers/net/dpaa2/mc/fsl_dpmng_cmd.h | 46 +
+ drivers/net/dpaa2/mc/fsl_dpni.h | 2581 ++++++++++++++++++++
+ drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 1058 ++++++++
+ drivers/net/dpaa2/mc/fsl_dprc.h | 1032 ++++++++
+ drivers/net/dpaa2/mc/fsl_dprc_cmd.h | 755 ++++++
+ drivers/net/dpaa2/mc/fsl_dprtc.h | 434 ++++
+ drivers/net/dpaa2/mc/fsl_dprtc_cmd.h | 181 ++
+ drivers/net/dpaa2/mc/fsl_dpseci.h | 647 +++++
+ drivers/net/dpaa2/mc/fsl_dpseci_cmd.h | 241 ++
+ drivers/net/dpaa2/mc/fsl_dpsw.h | 2164 ++++++++++++++++
+ drivers/net/dpaa2/mc/fsl_dpsw_cmd.h | 916 +++++++
+ drivers/net/dpaa2/mc/fsl_mc_cmd.h | 221 ++
+ drivers/net/dpaa2/mc/fsl_mc_sys.h | 95 +
+ drivers/net/dpaa2/mc/fsl_net.h | 480 ++++
+ drivers/net/dpaa2/mc/mc_sys.c | 129 +
+ drivers/net/dpaa2/qbman/driver/qbman_debug.c | 926 +++++++
+ drivers/net/dpaa2/qbman/driver/qbman_debug.h | 140 ++
+ drivers/net/dpaa2/qbman/driver/qbman_portal.c | 1407 +++++++++++
+ drivers/net/dpaa2/qbman/driver/qbman_portal.h | 266 ++
+ drivers/net/dpaa2/qbman/driver/qbman_private.h | 165 ++
+ drivers/net/dpaa2/qbman/driver/qbman_sys.h | 367 +++
+ drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h | 68 +
+ drivers/net/dpaa2/qbman/include/compat.h | 597 +++++
+ .../dpaa2/qbman/include/drivers/fsl_qbman_base.h | 151 ++
+ .../dpaa2/qbman/include/drivers/fsl_qbman_portal.h | 1089 +++++++++
+ drivers/net/dpaa2/rte_eth_dpaa2_pvt.h | 313 +++
+ drivers/net/dpaa2/rte_eth_dpbp.c | 430 ++++
+ drivers/net/dpaa2/rte_eth_dpio.c | 339 +++
+ drivers/net/dpaa2/rte_eth_dpni.c | 2230 +++++++++++++++++
+ drivers/net/dpaa2/rte_eth_dpni_annot.h | 311 +++
+ drivers/net/dpaa2/rte_pmd_dpaa2_version.map | 4 +
+ lib/librte_eal/common/eal_private.h | 12 +
+ lib/librte_eal/linuxapp/eal/Makefile | 11 +
+ lib/librte_eal/linuxapp/eal/eal.c | 10 +
+ lib/librte_eal/linuxapp/eal/eal_soc.c | 84 +
+ lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c | 653 +++++
+ lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h | 102 +
+ lib/librte_mbuf/Makefile | 4 +
+ lib/librte_mbuf/rte_mbuf.c | 67 +
+ lib/librte_mempool/Makefile | 4 +
+ lib/librte_mempool/rte_mempool.c | 13 +
+ lib/librte_mempool/rte_mempool.h | 30 +-
+ mk/machine/dpaa2/rte.vars.mk | 60 +
+ mk/rte.app.mk | 1 +
+ 89 files changed, 39560 insertions(+), 1 deletion(-)
+ create mode 100644 config/defconfig_arm64-dpaa2-linuxapp-gcc
+ create mode 100644 drivers/net/dpaa2/Makefile
+ create mode 100644 drivers/net/dpaa2/dpaa2_logs.h
+ create mode 100644 drivers/net/dpaa2/mc/dpaiop.c
+ create mode 100644 drivers/net/dpaa2/mc/dpbp.c
+ create mode 100644 drivers/net/dpaa2/mc/dpci.c
+ create mode 100644 drivers/net/dpaa2/mc/dpcon.c
+ create mode 100644 drivers/net/dpaa2/mc/dpdbg.c
+ create mode 100644 drivers/net/dpaa2/mc/dpdcei.c
+ create mode 100644 drivers/net/dpaa2/mc/dpdmai.c
+ create mode 100644 drivers/net/dpaa2/mc/dpdmux.c
+ create mode 100644 drivers/net/dpaa2/mc/dpio.c
+ create mode 100644 drivers/net/dpaa2/mc/dpmac.c
+ create mode 100644 drivers/net/dpaa2/mc/dpmcp.c
+ create mode 100644 drivers/net/dpaa2/mc/dpmng.c
+ create mode 100644 drivers/net/dpaa2/mc/dpni.c
+ create mode 100644 drivers/net/dpaa2/mc/dprc.c
+ create mode 100644 drivers/net/dpaa2/mc/dprtc.c
+ create mode 100644 drivers/net/dpaa2/mc/dpseci.c
+ create mode 100644 drivers/net/dpaa2/mc/dpsw.c
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpaiop.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpbp.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpbp_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpci.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpci_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpcon.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpcon_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpdbg.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpdcei.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmai.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmux.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpio.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpio_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpkg.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpmac.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpmac_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpmcp.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpmng.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpmng_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpni.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dprc.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dprc_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dprtc.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dprtc_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpseci.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpseci_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpsw.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_dpsw_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_mc_cmd.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_mc_sys.h
+ create mode 100644 drivers/net/dpaa2/mc/fsl_net.h
+ create mode 100644 drivers/net/dpaa2/mc/mc_sys.c
+ create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_debug.c
+ create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_debug.h
+ create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_portal.c
+ create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_portal.h
+ create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_private.h
+ create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_sys.h
+ create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h
+ create mode 100644 drivers/net/dpaa2/qbman/include/compat.h
+ create mode 100644 drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h
+ create mode 100644 drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h
+ create mode 100644 drivers/net/dpaa2/rte_eth_dpaa2_pvt.h
+ create mode 100644 drivers/net/dpaa2/rte_eth_dpbp.c
+ create mode 100644 drivers/net/dpaa2/rte_eth_dpio.c
+ create mode 100644 drivers/net/dpaa2/rte_eth_dpni.c
+ create mode 100644 drivers/net/dpaa2/rte_eth_dpni_annot.h
+ create mode 100644 drivers/net/dpaa2/rte_pmd_dpaa2_version.map
+ create mode 100644 lib/librte_eal/linuxapp/eal/eal_soc.c
+ create mode 100644 lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c
+ create mode 100644 lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h
+ create mode 100644 mk/machine/dpaa2/rte.vars.mk
+
+diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
+new file mode 100644
+index 0000000..fafbef4
+--- /dev/null
++++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
+@@ -0,0 +1,61 @@
++# BSD LICENSE
++#
++# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
++#
++# Redistribution and use in source and binary forms, with or without
++# modification, are permitted provided that the following conditions
++# are met:
++#
++# * Redistributions of source code must retain the above copyright
++# notice, this list of conditions and the following disclaimer.
++# * Redistributions in binary form must reproduce the above copyright
++# notice, this list of conditions and the following disclaimer in
++# the documentation and/or other materials provided with the
++# distribution.
++# * Neither the name of Freescale Semiconductor nor the names of its
++# contributors may be used to endorse or promote products derived
++# from this software without specific prior written permission.
++#
++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++#
++
++#include "defconfig_arm64-armv8a-linuxapp-gcc"
++
++# NXP (Freescale) - Soc Architecture with WRIOP and QBMAN support
++CONFIG_RTE_MACHINE="dpaa2"
++CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd"
++
++#
++# Compile Environment Abstraction Layer
++#
++CONFIG_RTE_MAX_LCORE=8
++CONFIG_RTE_MAX_NUMA_NODES=1
++
++# Compile software PMD backed by FSL DPAA2 files
++#
++CONFIG_RTE_LIBRTE_DPAA2_PMD=y
++CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=n
++CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n
++CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
++CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
++
++CONFIG_RTE_LIBRTE_PMD_BOND=y
++CONFIG_RTE_CACHE_LINE_SIZE=128
++CONFIG_RTE_EAL_IGB_UIO=n
++CONFIG_RTE_LIBRTE_KNI=n
++
++#FSL DPAA2 caam driver
++CONFIG_RTE_LIBRTE_PMD_DPAA2_CAAM=n
++CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_INIT=n
++CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_DRIVER=n
++CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_RX=n
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index 3386a67..ed10351 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -52,6 +52,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
+ DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
+ DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
+ DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt
++DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
+
+ ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
+ DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
+diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
+new file mode 100644
+index 0000000..3cf1782
+--- /dev/null
++++ b/drivers/net/dpaa2/Makefile
+@@ -0,0 +1,102 @@
++# BSD LICENSE
++#
++# Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved.
++#
++# Redistribution and use in source and binary forms, with or without
++# modification, are permitted provided that the following conditions
++# are met:
++#
++# * Redistributions of source code must retain the above copyright
++# notice, this list of conditions and the following disclaimer.
++# * Redistributions in binary form must reproduce the above copyright
++# notice, this list of conditions and the following disclaimer in
++# the documentation and/or other materials provided with the
++# distribution.
++# * Neither the name of Freescale Semiconductor nor the names of its
++# contributors may be used to endorse or promote products derived
++# from this software without specific prior written permission.
++#
++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++include $(RTE_SDK)/mk/rte.vars.mk
++
++#
++# library name
++#
++LIB = librte_pmd_dpaa2.a
++
++ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
++CFLAGS += -O0 -g
++CFLAGS += "-Wno-error"
++else
++CFLAGS += -O3 -g
++CFLAGS += $(WERROR_FLAGS)
++endif
++CFLAGS +=-Wno-strict-aliasing
++CFLAGS +=-Wno-missing-prototypes
++CFLAGS +=-Wno-missing-declarations
++CFLAGS +=-Wno-unused-function
++
++CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
++CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include
++CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include/drivers
++CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/driver/
++CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
++CFLAGS += -I$(RTE_SDK)/lib/librte_ether
++CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
++
++EXPORT_MAP := rte_pmd_dpaa2_version.map
++
++LIBABIVER := 1
++#
++# all source are stored in SRCS-y
++#
++SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += \
++ mc/dprc.c \
++ mc/dprtc.o \
++ mc/dpaiop.c \
++ mc/dpdbg.o \
++ mc/dpdcei.c \
++ mc/dpdmai.c \
++ mc/dpmac.c \
++ mc/dpmcp.c \
++ mc/dpbp.c \
++ mc/dpio.c \
++ mc/dpni.c \
++ mc/dpsw.c \
++ mc/dpci.c \
++ mc/dpcon.c \
++ mc/dpseci.c \
++ mc/dpmng.c \
++ mc/dpdmux.c \
++ mc/mc_sys.c
++
++#
++# all source are stored in SRCS-y
++#
++SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += \
++ qbman/driver/qbman_portal.c \
++ qbman/driver/qbman_debug.c
++
++SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpni.c
++SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpio.c
++SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpbp.c
++
++#
++# Export include files
++#
++SYMLINK-y-include +=
++
++# this lib depends upon:
++DEPDIRS-y += lib/librte_eal
++include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/net/dpaa2/dpaa2_logs.h b/drivers/net/dpaa2/dpaa2_logs.h
+new file mode 100644
+index 0000000..319786a
+--- /dev/null
++++ b/drivers/net/dpaa2/dpaa2_logs.h
+@@ -0,0 +1,77 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++#ifndef _DPAA2_LOGS_H_
++#define _DPAA2_LOGS_H_
++
++#define PMD_INIT_LOG(level, fmt, args...) \
++ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
++
++#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT
++#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
++#else
++#define PMD_INIT_FUNC_TRACE() do { } while (0)
++#endif
++
++#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX
++#define PMD_RX_LOG(level, fmt, args...) \
++ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
++#else
++#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
++#endif
++
++#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX
++#define PMD_TX_LOG(level, fmt, args...) \
++ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
++#else
++#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
++#endif
++
++#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE
++#define PMD_TX_FREE_LOG(level, fmt, args...) \
++ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
++#else
++#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
++#endif
++
++#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
++#define PMD_DRV_LOG_RAW(level, fmt, args...) \
++ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
++#else
++#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
++#endif
++
++#define PMD_DRV_LOG(level, fmt, args...) \
++ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
++
++#endif /* _DPAA2_LOGS_H_ */
+diff --git a/drivers/net/dpaa2/mc/dpaiop.c b/drivers/net/dpaa2/mc/dpaiop.c
+new file mode 100644
+index 0000000..7c1ecff
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpaiop.c
+@@ -0,0 +1,457 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpaiop.h>
++#include <fsl_dpaiop_cmd.h>
++
++int dpaiop_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpaiop_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPAIOP_CMD_OPEN(cmd, dpaiop_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return err;
++}
++
++int dpaiop_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpaiop_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ (void)(cfg); /* unused */
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPAIOP_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpaiop_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpaiop_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++
++ DPAIOP_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpaiop_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++
++ DPAIOP_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPAIOP_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpaiop_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++
++ DPAIOP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++
++ DPAIOP_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPAIOP_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpaiop_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++
++ DPAIOP_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++
++ DPAIOP_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPAIOP_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpaiop_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPAIOP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPAIOP_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpaiop_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPAIOP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpaiop_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPAIOP_RSP_GET_ATTRIBUTES(cmd, attr);
++
++ return 0;
++}
++
++int dpaiop_load(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpaiop_load_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_LOAD,
++ cmd_flags,
++ token);
++ DPAIOP_CMD_LOAD(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_run(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpaiop_run_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_RUN,
++ cmd_flags,
++ token);
++ DPAIOP_CMD_RUN(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_get_sl_version(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpaiop_sl_version *version)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_SL_VERSION,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPAIOP_RSP_GET_SL_VERSION(cmd, version);
++
++ return 0;
++}
++
++int dpaiop_get_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t *state)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_STATE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPAIOP_RSP_GET_STATE(cmd, *state);
++
++ return 0;
++}
++
++int dpaiop_set_time_of_day(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t time_of_day)
++{
++ struct mc_command cmd = { 0 };
++
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_TIME_OF_DAY,
++ cmd_flags,
++ token);
++
++ DPAIOP_CMD_SET_TIME_OF_DAY(cmd, time_of_day);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpaiop_get_time_of_day(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t *time_of_day)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_TIME_OF_DAY,
++ cmd_flags,
++ token);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPAIOP_RSP_GET_TIME_OF_DAY(cmd, *time_of_day);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpbp.c b/drivers/net/dpaa2/mc/dpbp.c
+new file mode 100644
+index 0000000..87899b8
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpbp.c
+@@ -0,0 +1,432 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpbp.h>
++#include <fsl_dpbp_cmd.h>
++
++int dpbp_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpbp_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPBP_CMD_OPEN(cmd, dpbp_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return err;
++}
++
++int dpbp_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpbp_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ (void)(cfg); /* unused */
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
++ cmd_flags,
++ 0);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpbp_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPBP_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpbp_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpbp_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpbp_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPBP_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPBP_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPBP_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPBP_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpbp_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPBP_RSP_GET_ATTRIBUTES(cmd, attr);
++
++ return 0;
++}
++
++int dpbp_set_notifications(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpbp_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
++ cmd_flags,
++ token);
++
++ DPBP_CMD_SET_NOTIFICATIONS(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpbp_get_notifications(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpbp_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPBP_CMD_GET_NOTIFICATIONS(cmd, cfg);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpci.c b/drivers/net/dpaa2/mc/dpci.c
+new file mode 100644
+index 0000000..2ec02a1
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpci.c
+@@ -0,0 +1,501 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpci.h>
++#include <fsl_dpci_cmd.h>
++
++int dpci_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpci_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPCI_CMD_OPEN(cmd, dpci_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpci_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpci_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPCI_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpci_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpci_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpci_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPCI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpci_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPCI_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpci_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPCI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPCI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpci_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPCI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPCI_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpci_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPCI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpci_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPCI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpci_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpci_get_peer_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpci_peer_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_PEER_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_PEER_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpci_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *up)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_LINK_STATE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_LINK_STATE(cmd, *up);
++
++ return 0;
++}
++
++int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ const struct dpci_rx_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpci_rx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPCI_CMD_GET_RX_QUEUE(cmd, priority);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_RX_QUEUE(cmd, attr);
++
++ return 0;
++}
++
++int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpci_tx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE,
++ cmd_flags,
++ token);
++ DPCI_CMD_GET_TX_QUEUE(cmd, priority);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCI_RSP_GET_TX_QUEUE(cmd, attr);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpcon.c b/drivers/net/dpaa2/mc/dpcon.c
+new file mode 100644
+index 0000000..396303d
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpcon.c
+@@ -0,0 +1,401 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpcon.h>
++#include <fsl_dpcon_cmd.h>
++
++int dpcon_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpcon_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPCON_CMD_OPEN(cmd, dpcon_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpcon_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpcon_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPCON_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpcon_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpcon_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpcon_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPCON_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpcon_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpcon_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpcon_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpcon_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpcon_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPCON_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpcon_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPCON_CMD_SET_NOTIFICATION(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
+diff --git a/drivers/net/dpaa2/mc/dpdbg.c b/drivers/net/dpaa2/mc/dpdbg.c
+new file mode 100644
+index 0000000..6f2a08d
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpdbg.c
+@@ -0,0 +1,547 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpdbg.h>
++#include <fsl_dpdbg_cmd.h>
++
++int dpdbg_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdbg_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPDBG_CMD_OPEN(cmd, dpdbg_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return err;
++}
++
++int dpdbg_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdbg_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDBG_RSP_GET_ATTRIBUTES(cmd, attr);
++
++ return 0;
++}
++
++int dpdbg_get_dpni_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ struct dpdbg_dpni_info *info)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPNI_INFO,
++ cmd_flags,
++ token);
++ DPDBG_CMD_GET_DPNI_INFO(cmd, dpni_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDBG_RSP_GET_DPNI_INFO(cmd, info);
++
++ return 0;
++}
++
++int dpdbg_get_dpni_priv_tx_conf_fqid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ uint8_t sender_id,
++ uint32_t *fqid)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPDBG_CMDID_GET_DPNI_PRIV_TX_CONF_FQID,
++ cmd_flags,
++ token);
++ DPDBG_CMD_GET_DPNI_PRIV_TX_CONF_FQID(cmd, dpni_id, sender_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDBG_RSP_GET_DPNI_PRIV_TX_CONF_FQID(cmd, *fqid);
++
++ return 0;
++}
++
++int dpdbg_get_dpcon_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id,
++ struct dpdbg_dpcon_info *info)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPCON_INFO,
++ cmd_flags,
++ token);
++ DPDBG_CMD_GET_DPCON_INFO(cmd, dpcon_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDBG_RSP_GET_DPCON_INFO(cmd, info);
++
++ return 0;
++}
++
++int dpdbg_get_dpbp_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpbp_id,
++ struct dpdbg_dpbp_info *info)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPBP_INFO,
++ cmd_flags,
++ token);
++ DPDBG_CMD_GET_DPBP_INFO(cmd, dpbp_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDBG_RSP_GET_DPBP_INFO(cmd, info);
++
++ return 0;
++}
++
++int dpdbg_get_dpci_fqid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpci_id,
++ uint8_t priority,
++ uint32_t *fqid)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPBP_INFO,
++ cmd_flags,
++ token);
++ DPDBG_CMD_GET_DPCI_FQID(cmd, dpci_id, priority);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDBG_RSP_GET_DPCI_FQID(cmd, *fqid);
++
++ return 0;
++}
++
++int dpdbg_prepare_ctlu_global_rule(struct dpkg_profile_cfg *dpkg_rule,
++ uint8_t *rule_buf)
++{
++ int i, j;
++ int offset = 0;
++ int param = 1;
++ uint64_t *params = (uint64_t *)rule_buf;
++
++ if (!rule_buf || !dpkg_rule)
++ return -EINVAL;
++
++ params[0] |= mc_enc(0, 8, dpkg_rule->num_extracts);
++ params[0] = cpu_to_le64(params[0]);
++
++ if (dpkg_rule->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS)
++ return -EINVAL;
++
++ for (i = 0; i < dpkg_rule->num_extracts; i++) {
++ switch (dpkg_rule->extracts[i].type) {
++ case DPKG_EXTRACT_FROM_HDR:
++ params[param] |= mc_enc(0, 8,
++ dpkg_rule->extracts[i].extract.from_hdr.prot);
++ params[param] |= mc_enc(8, 4,
++ dpkg_rule->extracts[i].extract.from_hdr.type);
++ params[param] |= mc_enc(16, 8,
++ dpkg_rule->extracts[i].extract.from_hdr.size);
++ params[param] |= mc_enc(24, 8,
++ dpkg_rule->extracts[i].extract.from_hdr.offset);
++ params[param] |= mc_enc(32, 32,
++ dpkg_rule->extracts[i].extract.from_hdr.field);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ params[param] |= mc_enc(0, 8,
++ dpkg_rule->extracts[i].extract.
++ from_hdr.hdr_index);
++ break;
++ case DPKG_EXTRACT_FROM_DATA:
++ params[param] |= mc_enc(16, 8,
++ dpkg_rule->extracts[i].extract.from_data.size);
++ params[param] |= mc_enc(24, 8,
++ dpkg_rule->extracts[i].extract.
++ from_data.offset);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ break;
++ case DPKG_EXTRACT_FROM_PARSE:
++ params[param] |= mc_enc(16, 8,
++ dpkg_rule->extracts[i].extract.from_parse.size);
++ params[param] |= mc_enc(24, 8,
++ dpkg_rule->extracts[i].extract.
++ from_parse.offset);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ break;
++ default:
++ return -EINVAL;
++ }
++ params[param] |= mc_enc(
++ 24, 8, dpkg_rule->extracts[i].num_of_byte_masks);
++ params[param] |= mc_enc(32, 4, dpkg_rule->extracts[i].type);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ for (offset = 0, j = 0;
++ j < DPKG_NUM_OF_MASKS;
++ offset += 16, j++) {
++ params[param] |= mc_enc(
++ (offset), 8,
++ dpkg_rule->extracts[i].masks[j].mask);
++ params[param] |= mc_enc(
++ (offset + 8), 8,
++ dpkg_rule->extracts[i].masks[j].offset);
++ }
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ }
++ return 0;
++}
++
++int dpdbg_set_ctlu_global_marking(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t marking,
++ struct dpdbg_rule_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_CTLU_GLOBAL_MARKING,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_CTLU_GLOBAL_MARKING(cmd, marking, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_dpni_rx_marking(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ struct dpdbg_dpni_rx_marking_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_RX_MARKING,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_DPNI_RX_MARKING(cmd, dpni_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_dpni_tx_conf_marking(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ uint16_t sender_id,
++ uint8_t marking)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_TX_CONF_MARKING,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_DPNI_TX_CONF_MARKING(cmd, dpni_id, sender_id, marking);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_dpio_marking(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpio_id,
++ uint8_t marking)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPIO_MARKING,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_DPIO_MARKING(cmd, dpio_id, marking);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_ctlu_global_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdbg_rule_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_CTLU_GLOBAL_TRACE,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_CTLU_GLOBAL_TRACE(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_dpio_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpio_id,
++ struct dpdbg_dpio_trace_cfg
++ trace_point[DPDBG_NUM_OF_DPIO_TRACE_POINTS])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPIO_TRACE,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_DPIO_TRACE(cmd, dpio_id, trace_point);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_dpni_rx_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ struct dpdbg_dpni_rx_trace_cfg *trace_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_RX_TRACE,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_DPNI_RX_TRACE(cmd, dpni_id, trace_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_dpni_tx_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ uint16_t sender_id,
++ struct dpdbg_dpni_tx_trace_cfg *trace_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_TX_TRACE,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_DPNI_TX_TRACE(cmd, dpni_id, sender_id, trace_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_dpcon_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id,
++ struct dpdbg_dpcon_trace_cfg
++ trace_point[DPDBG_NUM_OF_DPCON_TRACE_POINTS])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPCON_TRACE,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_DPCON_TRACE(cmd, dpcon_id, trace_point);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_set_dpseci_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpseci_id,
++ struct dpdbg_dpseci_trace_cfg
++ trace_point[DPDBG_NUM_OF_DPSECI_TRACE_POINTS])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPSECI_TRACE,
++ cmd_flags,
++ token);
++ DPDBG_CMD_SET_DPSECI_TRACE(cmd, dpseci_id, trace_point);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdbg_get_dpmac_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpmac_id,
++ enum dpmac_counter counter_type,
++ uint64_t *counter)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPMAC_COUNTER,
++ cmd_flags,
++ token);
++ DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpmac_id, counter_type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDBG_RSP_GET_DPMAC_COUNTER(cmd, *counter);
++
++ return 0;
++}
++
++int dpdbg_get_dpni_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ enum dpni_counter counter_type,
++ uint64_t *counter)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPNI_COUNTER,
++ cmd_flags,
++ token);
++ DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpni_id, counter_type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDBG_RSP_GET_DPNI_COUNTER(cmd, *counter);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpdcei.c b/drivers/net/dpaa2/mc/dpdcei.c
+new file mode 100644
+index 0000000..a5c4c47
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpdcei.c
+@@ -0,0 +1,449 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpdcei.h>
++#include <fsl_dpdcei_cmd.h>
++
++int dpdcei_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdcei_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPDCEI_CMD_OPEN(cmd, dpdcei_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdcei_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdcei_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPDCEI_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdcei_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDCEI_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpdcei_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdcei_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDCEI_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpdcei_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdcei_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDCEI_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpdcei_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDCEI_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpdcei_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDCEI_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpdcei_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdcei_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDCEI_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpdcei_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpdcei_rx_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPDCEI_CMD_SET_RX_QUEUE(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdcei_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdcei_rx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_RX_QUEUE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDCEI_RSP_GET_RX_QUEUE(cmd, attr);
++
++ return 0;
++}
++
++int dpdcei_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdcei_tx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_TX_QUEUE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDCEI_RSP_GET_TX_QUEUE(cmd, attr);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpdmai.c b/drivers/net/dpaa2/mc/dpdmai.c
+new file mode 100644
+index 0000000..154d2c6
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpdmai.c
+@@ -0,0 +1,452 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpdmai.h>
++#include <fsl_dpdmai_cmd.h>
++
++int dpdmai_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdmai_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPDMAI_CMD_OPEN(cmd, dpdmai_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdmai_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdmai_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPDMAI_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdmai_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpdmai_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdmai_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpdmai_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdmai_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdmai_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ const struct dpdmai_rx_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority, struct dpdmai_rx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
++
++ return 0;
++}
++
++int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpdmai_tx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
++ cmd_flags,
++ token);
++ DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c
+new file mode 100644
+index 0000000..dc07608
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpdmux.c
+@@ -0,0 +1,567 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpdmux.h>
++#include <fsl_dpdmux_cmd.h>
++
++int dpdmux_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdmux_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPDMUX_CMD_OPEN(cmd, dpdmux_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdmux_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdmux_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPDMUX_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpdmux_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpdmux_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdmux_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdmux_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdmux_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_accepted_frames *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_if_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_GET_ATTR(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_IF_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_l2_rule *rule)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, rule);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_l2_rule *rule)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, rule);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpdmux_counter_type counter_type,
++ uint64_t *counter)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_IF_GET_COUNTER(cmd, *counter);
++
++ return 0;
++}
++
++int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
++ cmd_flags,
++ token);
++ DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpio.c b/drivers/net/dpaa2/mc/dpio.c
+new file mode 100644
+index 0000000..f511e29
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpio.c
+@@ -0,0 +1,468 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpio.h>
++#include <fsl_dpio_cmd.h>
++
++int dpio_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpio_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPIO_CMD_OPEN(cmd, dpio_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpio_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpio_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPIO_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpio_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpio_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpio_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpio_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPIO_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpio_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpio_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpio_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpio_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpio_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t sdest)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
++ cmd_flags,
++ token);
++ DPIO_CMD_SET_STASHING_DEST(cmd, sdest);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t *sdest)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest);
++
++ return 0;
++}
++
++int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id,
++ uint8_t *channel_index)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL,
++ cmd_flags,
++ token);
++ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index);
++
++ return 0;
++}
++
++int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL,
++ cmd_flags,
++ token);
++ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+diff --git a/drivers/net/dpaa2/mc/dpmac.c b/drivers/net/dpaa2/mc/dpmac.c
+new file mode 100644
+index 0000000..f31d949
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpmac.c
+@@ -0,0 +1,422 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpmac.h>
++#include <fsl_dpmac_cmd.h>
++
++int dpmac_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpmac_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPMAC_CMD_OPEN(cmd, dpmac_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return err;
++}
++
++int dpmac_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpmac_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPMAC_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpmac_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpmac_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpmac_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr);
++
++ return 0;
++}
++
++int dpmac_mdio_read(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_mdio_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ,
++ cmd_flags,
++ token);
++ DPMAC_CMD_MDIO_READ(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMAC_RSP_MDIO_READ(cmd, cfg->data);
++
++ return 0;
++}
++
++int dpmac_mdio_write(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_mdio_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE,
++ cmd_flags,
++ token);
++ DPMAC_CMD_MDIO_WRITE(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPMAC_RSP_GET_LINK_CFG(cmd, cfg);
++
++ return 0;
++}
++
++int dpmac_set_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_link_state *link_state)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
++ cmd_flags,
++ token);
++ DPMAC_CMD_SET_LINK_STATE(cmd, link_state);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmac_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpmac_counter type,
++ uint64_t *counter)
++{
++ struct mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
++ cmd_flags,
++ token);
++ DPMAC_CMD_GET_COUNTER(cmd, type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPMAC_RSP_GET_COUNTER(cmd, *counter);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpmcp.c b/drivers/net/dpaa2/mc/dpmcp.c
+new file mode 100644
+index 0000000..dfd84b8
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpmcp.c
+@@ -0,0 +1,312 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpmcp.h>
++#include <fsl_dpmcp_cmd.h>
++
++int dpmcp_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpmcp_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPMCP_CMD_OPEN(cmd, dpmcp_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return err;
++}
++
++int dpmcp_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmcp_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpmcp_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPMCP_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpmcp_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmcp_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmcp_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpmcp_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmcp_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpmcp_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPMCP_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMCP_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMCP_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMCP_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMCP_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmcp_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMCP_RSP_GET_ATTRIBUTES(cmd, attr);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpmng.c b/drivers/net/dpaa2/mc/dpmng.c
+new file mode 100644
+index 0000000..cac5ba5
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpmng.c
+@@ -0,0 +1,58 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpmng.h>
++#include <fsl_dpmng_cmd.h>
++
++int mc_get_version(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ struct mc_version *mc_ver_info)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
++ cmd_flags,
++ 0);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPMNG_RSP_GET_VERSION(cmd, mc_ver_info);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
+new file mode 100644
+index 0000000..cdd2f37
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpni.c
+@@ -0,0 +1,1907 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpni.h>
++#include <fsl_dpni_cmd.h>
++
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
++ uint8_t *key_cfg_buf)
++{
++ int i, j;
++ int offset = 0;
++ int param = 1;
++ uint64_t *params = (uint64_t *)key_cfg_buf;
++
++ if (!key_cfg_buf || !cfg)
++ return -EINVAL;
++
++ params[0] |= mc_enc(0, 8, cfg->num_extracts);
++ params[0] = cpu_to_le64(params[0]);
++
++ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS)
++ return -EINVAL;
++
++ for (i = 0; i < cfg->num_extracts; i++) {
++ switch (cfg->extracts[i].type) {
++ case DPKG_EXTRACT_FROM_HDR:
++ params[param] |= mc_enc(0, 8,
++ cfg->extracts[i].extract.from_hdr.prot);
++ params[param] |= mc_enc(8, 4,
++ cfg->extracts[i].extract.from_hdr.type);
++ params[param] |= mc_enc(16, 8,
++ cfg->extracts[i].extract.from_hdr.size);
++ params[param] |= mc_enc(24, 8,
++ cfg->extracts[i].extract.
++ from_hdr.offset);
++ params[param] |= mc_enc(32, 32,
++ cfg->extracts[i].extract.
++ from_hdr.field);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ params[param] |= mc_enc(0, 8,
++ cfg->extracts[i].extract.
++ from_hdr.hdr_index);
++ break;
++ case DPKG_EXTRACT_FROM_DATA:
++ params[param] |= mc_enc(16, 8,
++ cfg->extracts[i].extract.
++ from_data.size);
++ params[param] |= mc_enc(24, 8,
++ cfg->extracts[i].extract.
++ from_data.offset);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ break;
++ case DPKG_EXTRACT_FROM_PARSE:
++ params[param] |= mc_enc(16, 8,
++ cfg->extracts[i].extract.
++ from_parse.size);
++ params[param] |= mc_enc(24, 8,
++ cfg->extracts[i].extract.
++ from_parse.offset);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ break;
++ default:
++ return -EINVAL;
++ }
++ params[param] |= mc_enc(
++ 24, 8, cfg->extracts[i].num_of_byte_masks);
++ params[param] |= mc_enc(32, 4, cfg->extracts[i].type);
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ for (offset = 0, j = 0;
++ j < DPKG_NUM_OF_MASKS;
++ offset += 16, j++) {
++ params[param] |= mc_enc(
++ (offset), 8, cfg->extracts[i].masks[j].mask);
++ params[param] |= mc_enc(
++ (offset + 8), 8,
++ cfg->extracts[i].masks[j].offset);
++ }
++ params[param] = cpu_to_le64(params[param]);
++ param++;
++ }
++ return 0;
++}
++
++int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg,
++ uint8_t *ext_cfg_buf)
++{
++ uint64_t *ext_params = (uint64_t *)ext_cfg_buf;
++
++ DPNI_PREP_EXTENDED_CFG(ext_params, cfg);
++
++ return 0;
++}
++
++int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg,
++ const uint8_t *ext_cfg_buf)
++{
++ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf;
++
++ DPNI_EXT_EXTENDED_CFG(ext_params, cfg);
++
++ return 0;
++}
++
++int dpni_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpni_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPNI_CMD_OPEN(cmd, dpni_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpni_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpni_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPNI_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpni_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_pools_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_POOLS(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpni_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpni_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpni_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_ATTR(cmd, attr);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_error_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout);
++
++ return 0;
++}
++
++int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout);
++
++ return 0;
++}
++
++int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout);
++
++ return 0;
++}
++
++int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *qdid)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_QDID(cmd, *qdid);
++
++ return 0;
++}
++
++int dpni_get_sp_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_sp_info *sp_info)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_SP_INFO(cmd, sp_info);
++
++ return 0;
++}
++
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *data_offset)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset);
++
++ return 0;
++}
++
++int dpni_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpni_counter counter,
++ uint64_t *value)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_COUNTER(cmd, counter);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_COUNTER(cmd, *value);
++
++ return 0;
++}
++
++int dpni_set_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpni_counter counter,
++ uint64_t value)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_COUNTER(cmd, counter, value);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_LINK_CFG(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_LINK_STATE(cmd, state);
++
++ return 0;
++}
++
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_tx_shaping_cfg *tx_shaper)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *max_frame_length)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length);
++
++ return 0;
++}
++
++int dpni_set_mtu(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t mtu)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_MTU(cmd, mtu);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_mtu(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *mtu)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_MTU(cmd, *mtu);
++
++ return 0;
++}
++
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en);
++
++ return 0;
++}
++
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr);
++
++ return 0;
++}
++
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
++ cmd_flags,
++ token);
++ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6])
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
++ cmd_flags,
++ token);
++ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int unicast,
++ int multicast)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
++ cmd_flags,
++ token);
++ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_vlan_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_VLAN_FILTERS(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
++ cmd_flags,
++ token);
++ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
++ cmd_flags,
++ token);
++ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_tx_selection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_tx_selection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_SELECTION(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_tx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *flow_id,
++ const struct dpni_tx_flow_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id);
++
++ return 0;
++}
++
++int dpni_get_tx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_tx_flow_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_FLOW(cmd, flow_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_TX_FLOW(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_rx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint16_t flow_id,
++ const struct dpni_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint16_t flow_id,
++ struct dpni_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_RX_FLOW(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int revoke)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_qos_table(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_qos_tbl_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_QOS_TABLE(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_rule_cfg *cfg,
++ uint8_t tc_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rule_cfg *cfg,
++ uint16_t flow_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT,
++ cmd_flags,
++ token);
++ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION,
++ cmd_flags, token);
++ DPNI_CMD_SET_VLAN_INSERTION(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_vlan_removal(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL,
++ cmd_flags, token);
++ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_ipr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IPR(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_ipf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_IPF(cmd, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rx_tc_policing_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_rx_tc_policing_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg);
++
++ return 0;
++}
++
++void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg,
++ uint8_t *early_drop_buf)
++{
++ uint64_t *ext_params = (uint64_t *)early_drop_buf;
++
++ DPNI_PREP_EARLY_DROP(ext_params, cfg);
++}
++
++void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
++ const uint8_t *early_drop_buf)
++{
++ const uint64_t *ext_params = (const uint64_t *)early_drop_buf;
++
++ DPNI_EXT_EARLY_DROP(ext_params, cfg);
++}
++
++int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg);
++
++ return 0;
++}
++
++int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg);
++
++ return 0;
++}
++
++int dpni_set_tx_conf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ const struct dpni_tx_conf_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_conf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_tx_conf_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_CONF(cmd, flow_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_TX_CONF(cmd, attr);
++
++ return 0;
++}
++
++int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ const struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_congestion_notification_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dprc.c b/drivers/net/dpaa2/mc/dprc.c
+new file mode 100644
+index 0000000..75c6a68
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dprc.c
+@@ -0,0 +1,786 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dprc.h>
++#include <fsl_dprc_cmd.h>
++
++int dprc_get_container_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int *container_id)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
++ cmd_flags,
++ 0);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_CONTAINER_ID(cmd, *container_id);
++
++ return 0;
++}
++
++int dprc_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int container_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
++ 0);
++ DPRC_CMD_OPEN(cmd, container_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dprc_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_create_container(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dprc_cfg *cfg,
++ int *child_container_id,
++ uint64_t *child_portal_paddr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ DPRC_CMD_CREATE_CONTAINER(cmd, cfg);
++
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_CREATE_CONTAINER(cmd, *child_container_id,
++ *child_portal_paddr);
++
++ return 0;
++}
++
++int dprc_destroy_container(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
++ cmd_flags,
++ token);
++ DPRC_CMD_DESTROY_CONTAINER(cmd, child_container_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_reset_container(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
++ cmd_flags,
++ token);
++ DPRC_CMD_RESET_CONTAINER(cmd, child_container_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dprc_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dprc_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dprc_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPRC_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPRC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPRC_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPRC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dprc_attributes *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_ATTRIBUTES(cmd, attr);
++
++ return 0;
++}
++
++int dprc_set_res_quota(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id,
++ char *type,
++ uint16_t quota)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
++ cmd_flags,
++ token);
++ DPRC_CMD_SET_RES_QUOTA(cmd, child_container_id, type, quota);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_res_quota(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id,
++ char *type,
++ uint16_t *quota)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_RES_QUOTA(cmd, child_container_id, type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_RES_QUOTA(cmd, *quota);
++
++ return 0;
++}
++
++int dprc_assign(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int container_id,
++ struct dprc_res_req *res_req)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
++ cmd_flags,
++ token);
++ DPRC_CMD_ASSIGN(cmd, container_id, res_req);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_unassign(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id,
++ struct dprc_res_req *res_req)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
++ cmd_flags,
++ token);
++ DPRC_CMD_UNASSIGN(cmd, child_container_id, res_req);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_pool_count(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *pool_count)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_POOL_COUNT(cmd, *pool_count);
++
++ return 0;
++}
++
++int dprc_get_pool(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int pool_index,
++ char *type)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_POOL(cmd, pool_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_POOL(cmd, type);
++
++ return 0;
++}
++
++int dprc_get_obj_count(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *obj_count)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_OBJ_COUNT(cmd, *obj_count);
++
++ return 0;
++}
++
++int dprc_get_obj(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int obj_index,
++ struct dprc_obj_desc *obj_desc)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_OBJ(cmd, obj_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_OBJ(cmd, obj_desc);
++
++ return 0;
++}
++
++int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ struct dprc_obj_desc *obj_desc)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_OBJ_DESC(cmd, obj_type, obj_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_OBJ_DESC(cmd, obj_desc);
++
++ return 0;
++}
++
++int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ uint8_t irq_index,
++ struct dprc_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
++ cmd_flags,
++ token);
++ DPRC_CMD_SET_OBJ_IRQ(cmd,
++ obj_type,
++ obj_id,
++ irq_index,
++ irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ uint8_t irq_index,
++ int *type,
++ struct dprc_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_OBJ_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dprc_get_res_count(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *type,
++ int *res_count)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ *res_count = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_RES_COUNT(cmd, type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_RES_COUNT(cmd, *res_count);
++
++ return 0;
++}
++
++int dprc_get_res_ids(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *type,
++ struct dprc_res_ids_range_desc *range_desc)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_RES_IDS(cmd, range_desc, type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_RES_IDS(cmd, range_desc);
++
++ return 0;
++}
++
++int dprc_get_obj_region(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ uint8_t region_index,
++ struct dprc_region_desc *region_desc)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_OBJ_REGION(cmd, region_desc);
++
++ return 0;
++}
++
++int dprc_set_obj_label(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ char *label)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
++ cmd_flags,
++ token);
++ DPRC_CMD_SET_OBJ_LABEL(cmd, obj_type, obj_id, label);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_connect(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dprc_endpoint *endpoint1,
++ const struct dprc_endpoint *endpoint2,
++ const struct dprc_connection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
++ cmd_flags,
++ token);
++ DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_disconnect(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dprc_endpoint *endpoint)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
++ cmd_flags,
++ token);
++ DPRC_CMD_DISCONNECT(cmd, endpoint);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprc_get_connection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dprc_endpoint *endpoint1,
++ struct dprc_endpoint *endpoint2,
++ int *state)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
++ cmd_flags,
++ token);
++ DPRC_CMD_GET_CONNECTION(cmd, endpoint1);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRC_RSP_GET_CONNECTION(cmd, endpoint2, *state);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dprtc.c b/drivers/net/dpaa2/mc/dprtc.c
+new file mode 100644
+index 0000000..73667af
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dprtc.c
+@@ -0,0 +1,509 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dprtc.h>
++#include <fsl_dprtc_cmd.h>
++
++int dprtc_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dprtc_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPRTC_CMD_OPEN(cmd, dprtc_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return err;
++}
++
++int dprtc_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dprtc_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ (void)(cfg); /* unused */
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
++ cmd_flags,
++ 0);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dprtc_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRTC_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dprtc_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dprtc_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dprtc_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRTC_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRTC_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRTC_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRTC_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dprtc_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRTC_RSP_GET_ATTRIBUTES(cmd, attr);
++
++ return 0;
++}
++
++int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int64_t offset)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_SET_CLOCK_OFFSET(cmd, offset);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t freq_compensation)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_SET_FREQ_COMPENSATION(cmd, freq_compensation);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t *freq_compensation)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRTC_RSP_GET_FREQ_COMPENSATION(cmd, *freq_compensation);
++
++ return 0;
++}
++
++int dprtc_get_time(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t *time)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPRTC_RSP_GET_TIME(cmd, *time);
++
++ return 0;
++}
++
++int dprtc_set_time(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t time)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_SET_TIME(cmd, time);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dprtc_set_alarm(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token, uint64_t time)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
++ cmd_flags,
++ token);
++
++ DPRTC_CMD_SET_ALARM(cmd, time);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+diff --git a/drivers/net/dpaa2/mc/dpseci.c b/drivers/net/dpaa2/mc/dpseci.c
+new file mode 100644
+index 0000000..a4b932a
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpseci.c
+@@ -0,0 +1,502 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpseci.h>
++#include <fsl_dpseci_cmd.h>
++
++int dpseci_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpseci_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPSECI_CMD_OPEN(cmd, dpseci_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpseci_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpseci_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPSECI_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpseci_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpseci_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpseci_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPSECI_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpseci_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpseci_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpseci_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t queue,
++ const struct dpseci_rx_queue_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t queue,
++ struct dpseci_rx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
++ cmd_flags,
++ token);
++ DPSECI_CMD_GET_RX_QUEUE(cmd, queue);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_RX_QUEUE(cmd, attr);
++
++ return 0;
++}
++
++int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t queue,
++ struct dpseci_tx_queue_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
++ cmd_flags,
++ token);
++ DPSECI_CMD_GET_TX_QUEUE(cmd, queue);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_TX_QUEUE(cmd, attr);
++
++ return 0;
++}
++
++int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpseci_sec_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_SEC_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpseci_sec_counters *counters)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters);
++
++ return 0;
++}
+diff --git a/drivers/net/dpaa2/mc/dpsw.c b/drivers/net/dpaa2/mc/dpsw.c
+new file mode 100644
+index 0000000..2034b55
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/dpsw.c
+@@ -0,0 +1,1639 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++#include <fsl_dpsw.h>
++#include <fsl_dpsw_cmd.h>
++
++/* internal functions */
++static void build_if_id_bitmap(const uint16_t *if_id,
++ const uint16_t num_ifs,
++ struct mc_command *cmd,
++ int start_param)
++{
++ int i;
++
++ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
++ cmd->params[start_param + (if_id[i] / 64)] |= mc_enc(
++ (if_id[i] % 64), 1, 1);
++}
++
++static int read_if_id_bitmap(uint16_t *if_id,
++ uint16_t *num_ifs,
++ struct mc_command *cmd,
++ int start_param)
++{
++ int bitmap[DPSW_MAX_IF] = { 0 };
++ int i, j = 0;
++ int count = 0;
++
++ for (i = 0; i < DPSW_MAX_IF; i++) {
++ bitmap[i] = (int)mc_dec(cmd->params[start_param + i / 64],
++ i % 64, 1);
++ count += bitmap[i];
++ }
++
++ *num_ifs = (uint16_t)count;
++
++ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
++ if (bitmap[i]) {
++ if_id[j] = (uint16_t)i;
++ j++;
++ }
++ }
++
++ return 0;
++}
++
++/* DPSW APIs */
++int dpsw_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpsw_id,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ DPSW_CMD_OPEN(cmd, dpsw_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpsw_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpsw_cfg *cfg,
++ uint16_t *token)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CREATE,
++ cmd_flags,
++ 0);
++ DPSW_CMD_CREATE(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
++
++ return 0;
++}
++
++int dpsw_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DESTROY,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IS_ENABLED(cmd, *en);
++
++ return 0;
++}
++
++int dpsw_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpsw_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpsw_irq_cfg *irq_cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
++ cmd_flags,
++ token);
++ DPSW_CMD_GET_IRQ(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_IRQ(cmd, *type, irq_cfg);
++
++ return 0;
++}
++
++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_IRQ_ENABLE(cmd, *en);
++
++ return 0;
++}
++
++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ DPSW_CMD_GET_IRQ_MASK(cmd, irq_index);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_IRQ_MASK(cmd, *mask);
++
++ return 0;
++}
++
++int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_IRQ_STATUS(cmd, *status);
++
++ return 0;
++}
++
++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpsw_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_SET_REFLECTION_IF(cmd, if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_link_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_link_state *state)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IF_GET_LINK_STATE(cmd, state);
++
++ return 0;
++}
++
++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_tci_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_tci_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_TCI(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IF_GET_TCI(cmd, cfg);
++
++ return 0;
++}
++
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_stp_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_STP(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_accepted_frames_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int accept_all)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpsw_counter type,
++ uint64_t *counter)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IF_GET_COUNTER(cmd, *counter);
++
++ return 0;
++}
++
++int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpsw_counter type,
++ uint64_t counter)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_tx_selection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_reflection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_reflection_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_metering_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint8_t tc_id,
++ const struct dpsw_metering_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
++ uint8_t *early_drop_buf)
++{
++ uint64_t *ext_params = (uint64_t *)early_drop_buf;
++
++ DPSW_PREP_EARLY_DROP(ext_params, cfg);
++}
++
++int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint8_t tc_id,
++ uint64_t early_drop_iova)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_custom_tpid_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
++ cmd_flags,
++ token);
++ DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_custom_tpid_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
++ cmd_flags,
++ token);
++ DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_ENABLE(cmd, if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_DISABLE(cmd, if_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_if_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_ATTR(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_IF_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint16_t frame_length)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint16_t *frame_length)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, *frame_length);
++
++ return 0;
++}
++
++int dpsw_vlan_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_REMOVE(cmd, vlan_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_VLAN_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_GET_IF(cmd, vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_VLAN_GET_IF(cmd, cfg);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
++
++ return 0;
++}
++
++int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
++
++ return 0;
++}
++
++int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
++ cmd_flags,
++ token);
++ DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_VLAN_GET_IF(cmd, cfg);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
++
++ return 0;
++}
++
++int dpsw_fdb_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *fdb_id,
++ const struct dpsw_fdb_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_ADD(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_FDB_ADD(cmd, *fdb_id);
++
++ return 0;
++}
++
++int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_REMOVE(cmd, fdb_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_FDB_GET_UNICAST(cmd, cfg);
++
++ return 0;
++}
++
++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg);
++ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 2);
++
++ return 0;
++}
++
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ enum dpsw_fdb_learning_mode mode)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
++ cmd_flags,
++ token);
++ DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_FDB_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_acl_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *acl_id,
++ const struct dpsw_acl_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_ADD(cmd, cfg);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_ACL_ADD(cmd, *acl_id);
++
++ return 0;
++}
++
++int dpsw_acl_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_REMOVE(cmd, acl_id);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
++ uint8_t *entry_cfg_buf)
++{
++ uint64_t *ext_params = (uint64_t *)entry_cfg_buf;
++
++ DPSW_PREP_ACL_ENTRY(ext_params, key);
++}
++
++int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_entry_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_entry_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_if_cfg *cfg)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ struct dpsw_acl_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
++ cmd_flags,
++ token);
++ DPSW_CMD_ACL_GET_ATTR(cmd, acl_id);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_ACL_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpsw_ctrl_if_attr *attr)
++{
++ struct mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr);
++
++ return 0;
++}
++
++int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_ctrl_if_pools_cfg *pools)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
++ cmd_flags,
++ token);
++ DPSW_CMD_CTRL_IF_SET_POOLS(cmd, pools);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++* @brief Function disables control interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ struct mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+diff --git a/drivers/net/dpaa2/mc/fsl_dpaiop.h b/drivers/net/dpaa2/mc/fsl_dpaiop.h
+new file mode 100644
+index 0000000..b039b2a
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpaiop.h
+@@ -0,0 +1,494 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPAIOP_H
++#define __FSL_DPAIOP_H
++
++struct fsl_mc_io;
++
++/* Data Path AIOP API
++ * Contains initialization APIs and runtime control APIs for DPAIOP
++ */
++
++/**
++ * dpaiop_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpaiop_id: DPAIOP unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpaiop_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpaiop_id,
++ uint16_t *token);
++
++/**
++ * dpaiop_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_close(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token);
++
++/**
++ * struct dpaiop_cfg - Structure representing DPAIOP configuration
++ * @aiop_id: AIOP ID
++ * @aiop_container_id: AIOP container ID
++ */
++struct dpaiop_cfg {
++ int aiop_id;
++ int aiop_container_id;
++};
++
++/**
++ * dpaiop_create() - Create the DPAIOP object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPAIOP object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpaiop_open function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpaiop_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpaiop_destroy() - Destroy the DPAIOP object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpaiop_destroy(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token);
++
++/**
++ * dpaiop_reset() - Reset the DPAIOP, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_reset(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token);
++
++/**
++ * struct dpaiop_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpaiop_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpaiop_set_irq() - Set IRQ information for the DPAIOP to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpaiop_irq_cfg *irq_cfg);
++
++/**
++ * dpaiop_get_irq() - Get IRQ information from the DPAIOP.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpaiop_irq_cfg *irq_cfg);
++
++/**
++ * dpaiop_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpaiop_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpaiop_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpaiop_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpaiop_get_irq_status() - Get the current status of any pending interrupts.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpaiop_clear_irq_status() - Clear a pending interrupt's status
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @irq_index: The interrupt index to configure
++ * @status: Bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpaiop_attr - Structure representing DPAIOP attributes
++ * @id: AIOP ID
++ * @version: DPAIOP version
++ */
++struct dpaiop_attr {
++ int id;
++ /**
++ * struct version - Structure representing DPAIOP version
++ * @major: DPAIOP major version
++ * @minor: DPAIOP minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++};
++
++/**
++ * dpaiop_get_attributes - Retrieve DPAIOP attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpaiop_attr *attr);
++
++/**
++ * struct dpaiop_load_cfg - AIOP load configuration
++ * @options: AIOP load options
++ * @img_iova: I/O virtual address of AIOP ELF image
++ * @img_size: Size of AIOP ELF image in memory (in bytes)
++ */
++struct dpaiop_load_cfg {
++ uint64_t options;
++ uint64_t img_iova;
++ uint32_t img_size;
++};
++
++/**
++ * dpaiop_load_aiop() - Loads an image to AIOP
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @cfg: AIOP load configurations
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_load(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpaiop_load_cfg *cfg);
++
++#define DPAIOP_RUN_OPT_DEBUG 0x0000000000000001ULL
++
++/**
++ * struct dpaiop_run_cfg - AIOP run configuration
++ * @cores_mask: Mask of AIOP cores to run (core 0 in most significant bit)
++ * @options: Execution options (currently none defined)
++ * @args_iova: I/O virtual address of AIOP arguments
++ * @args_size: Size of AIOP arguments in memory (in bytes)
++ */
++struct dpaiop_run_cfg {
++ uint64_t cores_mask;
++ uint64_t options;
++ uint64_t args_iova;
++ uint32_t args_size;
++};
++
++/**
++ * dpaiop_run_aiop() - Starts AIOP execution
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @cfg: AIOP run configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_run(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpaiop_run_cfg *cfg);
++
++/**
++ * struct dpaiop_sl_version - AIOP SL (Service Layer) version
++ * @major: AIOP SL major version number
++ * @minor: AIOP SL minor version number
++ * @revision: AIOP SL revision number
++ */
++struct dpaiop_sl_version {
++ uint32_t major;
++ uint32_t minor;
++ uint32_t revision;
++};
++
++/**
++ * dpaiop_get_sl_version() - Get AIOP SL (Service Layer) version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @version: AIOP SL version number
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_get_sl_version(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpaiop_sl_version *version);
++
++/**
++ * AIOP states
++ *
++ * AIOP internal states, can be retrieved by calling dpaiop_get_state() routine
++ */
++
++/**
++ * AIOP reset successfully completed.
++ */
++#define DPAIOP_STATE_RESET_DONE 0x00000000
++/**
++ * AIOP reset is ongoing.
++ */
++#define DPAIOP_STATE_RESET_ONGOING 0x00000001
++
++/**
++ * AIOP image loading successfully completed.
++ */
++#define DPAIOP_STATE_LOAD_DONE 0x00000002
++/**
++ * AIOP image loading is ongoing.
++ */
++#define DPAIOP_STATE_LOAD_ONGIONG 0x00000004
++/**
++ * AIOP image loading completed with error.
++ */
++#define DPAIOP_STATE_LOAD_ERROR 0x00000008
++
++/**
++ * Boot process of AIOP cores is ongoing.
++ */
++#define DPAIOP_STATE_BOOT_ONGOING 0x00000010
++/**
++ * Boot process of AIOP cores completed with an error.
++ */
++#define DPAIOP_STATE_BOOT_ERROR 0x00000020
++/**
++ * AIOP cores are functional and running
++ */
++#define DPAIOP_STATE_RUNNING 0x00000040
++/** @} */
++
++/**
++ * dpaiop_get_state() - Get AIOP state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @state: AIOP state
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_get_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t *state);
++
++/**
++ * dpaiop_set_time_of_day() - Set AIOP internal time-of-day
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @time_of_day: Current number of milliseconds since the Epoch
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_set_time_of_day(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t time_of_day);
++
++/**
++ * dpaiop_get_time_of_day() - Get AIOP internal time-of-day
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPAIOP object
++ * @time_of_day: Current number of milliseconds since the Epoch
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpaiop_get_time_of_day(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t *time_of_day);
++
++#endif /* __FSL_DPAIOP_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h b/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h
+new file mode 100644
+index 0000000..5b77bb8
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h
+@@ -0,0 +1,190 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPAIOP_CMD_H
++#define _FSL_DPAIOP_CMD_H
++
++/* DPAIOP Version */
++#define DPAIOP_VER_MAJOR 1
++#define DPAIOP_VER_MINOR 2
++
++/* Command IDs */
++#define DPAIOP_CMDID_CLOSE 0x800
++#define DPAIOP_CMDID_OPEN 0x80a
++#define DPAIOP_CMDID_CREATE 0x90a
++#define DPAIOP_CMDID_DESTROY 0x900
++
++#define DPAIOP_CMDID_GET_ATTR 0x004
++#define DPAIOP_CMDID_RESET 0x005
++
++#define DPAIOP_CMDID_SET_IRQ 0x010
++#define DPAIOP_CMDID_GET_IRQ 0x011
++#define DPAIOP_CMDID_SET_IRQ_ENABLE 0x012
++#define DPAIOP_CMDID_GET_IRQ_ENABLE 0x013
++#define DPAIOP_CMDID_SET_IRQ_MASK 0x014
++#define DPAIOP_CMDID_GET_IRQ_MASK 0x015
++#define DPAIOP_CMDID_GET_IRQ_STATUS 0x016
++#define DPAIOP_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPAIOP_CMDID_LOAD 0x280
++#define DPAIOP_CMDID_RUN 0x281
++#define DPAIOP_CMDID_GET_SL_VERSION 0x282
++#define DPAIOP_CMDID_GET_STATE 0x283
++#define DPAIOP_CMDID_SET_TIME_OF_DAY 0x284
++#define DPAIOP_CMDID_GET_TIME_OF_DAY 0x285
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_OPEN(cmd, dpaiop_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpaiop_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->aiop_id);\
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->aiop_container_id);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_RSP_GET_ATTRIBUTES(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_LOAD(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->img_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->img_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_RUN(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->args_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->cores_mask); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->args_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_RSP_GET_SL_VERSION(cmd, version) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, version->major);\
++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, version->minor);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, version->revision);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_RSP_GET_STATE(cmd, state) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, state)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_CMD_SET_TIME_OF_DAY(cmd, time_of_day) \
++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time_of_day)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPAIOP_RSP_GET_TIME_OF_DAY(cmd, time_of_day) \
++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, time_of_day)
++
++#endif /* _FSL_DPAIOP_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpbp.h b/drivers/net/dpaa2/mc/fsl_dpbp.h
+new file mode 100644
+index 0000000..9856bb8
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpbp.h
+@@ -0,0 +1,438 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPBP_H
++#define __FSL_DPBP_H
++
++/* Data Path Buffer Pool API
++ * Contains initialization APIs and runtime control APIs for DPBP
++ */
++
++struct fsl_mc_io;
++
++/**
++ * dpbp_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpbp_id: DPBP unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpbp_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpbp_id,
++ uint16_t *token);
++
++/**
++ * dpbp_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpbp_cfg - Structure representing DPBP configuration
++ * @options: place holder
++ */
++struct dpbp_cfg {
++ uint32_t options;
++};
++
++/**
++ * dpbp_create() - Create the DPBP object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPBP object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpbp_open function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpbp_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpbp_destroy() - Destroy the DPBP object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpbp_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpbp_enable() - Enable the DPBP.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpbp_disable() - Disable the DPBP.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpbp_is_enabled() - Check if the DPBP is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpbp_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpbp_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpbp_irq_cfg *irq_cfg);
++
++/**
++ * dpbp_get_irq() - Get IRQ information from the DPBP.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpbp_irq_cfg *irq_cfg);
++
++/**
++ * dpbp_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpbp_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpbp_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpbp_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpbp_get_irq_status() - Get the current status of any pending interrupts.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpbp_clear_irq_status() - Clear a pending interrupt's status
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @irq_index: The interrupt index to configure
++ * @status: Bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpbp_attr - Structure representing DPBP attributes
++ * @id: DPBP object ID
++ * @version: DPBP version
++ * @bpid: Hardware buffer pool ID; should be used as an argument in
++ * acquire/release operations on buffers
++ */
++struct dpbp_attr {
++ int id;
++ /**
++ * struct version - Structure representing DPBP version
++ * @major: DPBP major version
++ * @minor: DPBP minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint16_t bpid;
++};
++
++/**
++ * dpbp_get_attributes - Retrieve DPBP attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpbp_attr *attr);
++
++/**
++ * DPBP notifications options
++ */
++
++/**
++ * BPSCN write will attempt to allocate into a cache (coherent write)
++ */
++#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
++
++/**
++ * struct dpbp_notification_cfg - Structure representing DPBP notifications
++ * towards software
++ * @depletion_entry: below this threshold the pool is "depleted";
++ * set it to '0' to disable it
++ * @depletion_exit: greater than or equal to this threshold the pool exit its
++ * "depleted" state
++ * @surplus_entry: above this threshold the pool is in "surplus" state;
++ * set it to '0' to disable it
++ * @surplus_exit: less than or equal to this threshold the pool exit its
++ * "surplus" state
++ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
++ * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
++ * must be 16B aligned.
++ * @message_ctx: The context that will be part of the BPSCN message and will
++ * be written to 'message_iova'
++ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
++ */
++struct dpbp_notification_cfg {
++ uint32_t depletion_entry;
++ uint32_t depletion_exit;
++ uint32_t surplus_entry;
++ uint32_t surplus_exit;
++ uint64_t message_iova;
++ uint64_t message_ctx;
++ uint16_t options;
++};
++
++/**
++ * dpbp_set_notifications() - Set notifications towards software
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @cfg: notifications configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_set_notifications(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpbp_notification_cfg *cfg);
++
++/**
++ * dpbp_get_notifications() - Get the notifications configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @cfg: notifications configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_get_notifications(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpbp_notification_cfg *cfg);
++
++#endif /* __FSL_DPBP_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h b/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h
+new file mode 100644
+index 0000000..71ad96a
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h
+@@ -0,0 +1,172 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPBP_CMD_H
++#define _FSL_DPBP_CMD_H
++
++/* DPBP Version */
++#define DPBP_VER_MAJOR 2
++#define DPBP_VER_MINOR 2
++
++/* Command IDs */
++#define DPBP_CMDID_CLOSE 0x800
++#define DPBP_CMDID_OPEN 0x804
++#define DPBP_CMDID_CREATE 0x904
++#define DPBP_CMDID_DESTROY 0x900
++
++#define DPBP_CMDID_ENABLE 0x002
++#define DPBP_CMDID_DISABLE 0x003
++#define DPBP_CMDID_GET_ATTR 0x004
++#define DPBP_CMDID_RESET 0x005
++#define DPBP_CMDID_IS_ENABLED 0x006
++
++#define DPBP_CMDID_SET_IRQ 0x010
++#define DPBP_CMDID_GET_IRQ 0x011
++#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
++#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
++#define DPBP_CMDID_SET_IRQ_MASK 0x014
++#define DPBP_CMDID_GET_IRQ_MASK 0x015
++#define DPBP_CMDID_GET_IRQ_STATUS 0x016
++#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
++#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_OPEN(cmd, dpbp_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_RSP_GET_ATTRIBUTES(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->bpid); \
++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_SET_NOTIFICATIONS(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->depletion_entry); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->depletion_exit);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->surplus_entry);\
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->surplus_exit);\
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options);\
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx);\
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPBP_CMD_GET_NOTIFICATIONS(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, cfg->depletion_entry); \
++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->depletion_exit);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->surplus_entry);\
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->surplus_exit);\
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options);\
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx);\
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova);\
++} while (0)
++#endif /* _FSL_DPBP_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpci.h b/drivers/net/dpaa2/mc/fsl_dpci.h
+new file mode 100644
+index 0000000..d885935
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpci.h
+@@ -0,0 +1,594 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPCI_H
++#define __FSL_DPCI_H
++
++/* Data Path Communication Interface API
++ * Contains initialization APIs and runtime control APIs for DPCI
++ */
++
++struct fsl_mc_io;
++
++/** General DPCI macros */
++
++/**
++ * Maximum number of Tx/Rx priorities per DPCI object
++ */
++#define DPCI_PRIO_NUM 2
++
++/**
++ * Indicates an invalid frame queue
++ */
++#define DPCI_FQID_NOT_VALID (uint32_t)(-1)
++
++/**
++ * All queues considered; see dpci_set_rx_queue()
++ */
++#define DPCI_ALL_QUEUES (uint8_t)(-1)
++
++/**
++ * dpci_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpci_id: DPCI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpci_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpci_id,
++ uint16_t *token);
++
++/**
++ * dpci_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpci_cfg - Structure representing DPCI configuration
++ * @num_of_priorities: Number of receive priorities (queues) for the DPCI;
++ * note, that the number of transmit priorities (queues)
++ * is determined by the number of receive priorities of
++ * the peer DPCI object
++ */
++struct dpci_cfg {
++ uint8_t num_of_priorities;
++};
++
++/**
++ * dpci_create() - Create the DPCI object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPCI object, allocate required resources and perform required
++ * initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpci_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpci_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpci_destroy() - Destroy the DPCI object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpci_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpci_is_enabled() - Check if the DPCI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpci_reset() - Reset the DPCI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/** DPCI IRQ Index and Events */
++
++/**
++ * IRQ index
++ */
++#define DPCI_IRQ_INDEX 0
++
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPCI_IRQ_EVENT_LINK_CHANGED 0x00000001
++/**
++ * IRQ event - indicates a connection event
++ */
++#define DPCI_IRQ_EVENT_CONNECTED 0x00000002
++/**
++ * IRQ event - indicates a disconnection event
++ */
++#define DPCI_IRQ_EVENT_DISCONNECTED 0x00000004
++
++/**
++ * struct dpci_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpci_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpci_set_irq() - Set IRQ information for the DPCI to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpci_irq_cfg *irq_cfg);
++
++/**
++ * dpci_get_irq() - Get IRQ information from the DPCI.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpci_irq_cfg *irq_cfg);
++
++/**
++ * dpci_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpci_get_irq_enable() - Get overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpci_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpci_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpci_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpci_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpci_attr - Structure representing DPCI attributes
++ * @id: DPCI object ID
++ * @version: DPCI version
++ * @num_of_priorities: Number of receive priorities
++ */
++struct dpci_attr {
++ int id;
++ /**
++ * struct version - Structure representing DPCI attributes
++ * @major: DPCI major version
++ * @minor: DPCI minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint8_t num_of_priorities;
++};
++
++/**
++ * dpci_get_attributes() - Retrieve DPCI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpci_attr *attr);
++
++/**
++ * struct dpci_peer_attr - Structure representing the peer DPCI attributes
++ * @peer_id: DPCI peer id; if no peer is connected returns (-1)
++ * @num_of_priorities: The pper's number of receive priorities; determines the
++ * number of transmit priorities for the local DPCI object
++ */
++struct dpci_peer_attr {
++ int peer_id;
++ uint8_t num_of_priorities;
++};
++
++/**
++ * dpci_get_peer_attributes() - Retrieve peer DPCI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @attr: Returned peer attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_peer_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpci_peer_attr *attr);
++
++/**
++ * dpci_get_link_state() - Retrieve the DPCI link state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @up: Returned link state; returns '1' if link is up, '0' otherwise
++ *
++ * DPCI can be connected to another DPCI, together they
++ * create a 'link'. In order to use the DPCI Tx and Rx queues,
++ * both objects must be enabled.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *up);
++
++/**
++ * enum dpci_dest - DPCI destination types
++ * @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode
++ * and does not generate FQDAN notifications; user is
++ * expected to dequeue from the queue based on polling or
++ * other user-defined method
++ * @DPCI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected
++ * to dequeue from the queue only after notification is
++ * received
++ * @DPCI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified
++ * DPCON object;
++ * user is expected to dequeue from the DPCON channel
++ */
++enum dpci_dest {
++ DPCI_DEST_NONE = 0,
++ DPCI_DEST_DPIO = 1,
++ DPCI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpci_dest_cfg - Structure representing DPCI destination configuration
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid
++ * values are 0-1 or 0-7, depending on the number of priorities
++ * in that channel; not relevant for 'DPCI_DEST_NONE' option
++ */
++struct dpci_dest_cfg {
++ enum dpci_dest dest_type;
++ int dest_id;
++ uint8_t priority;
++};
++
++/** DPCI queue modification options */
++
++/**
++ * Select to modify the user's context associated with the queue
++ */
++#define DPCI_QUEUE_OPT_USER_CTX 0x00000001
++
++/**
++ * Select to modify the queue's destination
++ */
++#define DPCI_QUEUE_OPT_DEST 0x00000002
++
++/**
++ * struct dpci_rx_queue_cfg - Structure representing RX queue configuration
++ * @options: Flags representing the suggested modifications to the queue;
++ * Use any combination of 'DPCI_QUEUE_OPT_<X>' flags
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame;
++ * valid only if 'DPCI_QUEUE_OPT_USER_CTX' is contained in
++ * 'options'
++ * @dest_cfg: Queue destination parameters;
++ * valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options'
++ */
++struct dpci_rx_queue_cfg {
++ uint32_t options;
++ uint64_t user_ctx;
++ struct dpci_dest_cfg dest_cfg;
++};
++
++/**
++ * dpci_set_rx_queue() - Set Rx queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @priority: Select the queue relative to number of
++ * priorities configured at DPCI creation; use
++ * DPCI_ALL_QUEUES to configure all Rx queues
++ * identically.
++ * @cfg: Rx queue configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ const struct dpci_rx_queue_cfg *cfg);
++
++/**
++ * struct dpci_rx_queue_attr - Structure representing Rx queue attributes
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame
++ * @dest_cfg: Queue destination configuration
++ * @fqid: Virtual FQID value to be used for dequeue operations
++ */
++struct dpci_rx_queue_attr {
++ uint64_t user_ctx;
++ struct dpci_dest_cfg dest_cfg;
++ uint32_t fqid;
++};
++
++/**
++ * dpci_get_rx_queue() - Retrieve Rx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @priority: Select the queue relative to number of
++ * priorities configured at DPCI creation
++ * @attr: Returned Rx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpci_rx_queue_attr *attr);
++
++/**
++ * struct dpci_tx_queue_attr - Structure representing attributes of Tx queues
++ * @fqid: Virtual FQID to be used for sending frames to peer DPCI;
++ * returns 'DPCI_FQID_NOT_VALID' if a no peer is connected or if
++ * the selected priority exceeds the number of priorities of the
++ * peer DPCI object
++ */
++struct dpci_tx_queue_attr {
++ uint32_t fqid;
++};
++
++/**
++ * dpci_get_tx_queue() - Retrieve Tx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @priority: Select the queue relative to number of
++ * priorities of the peer DPCI object
++ * @attr: Returned Tx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpci_tx_queue_attr *attr);
++
++#endif /* __FSL_DPCI_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpci_cmd.h b/drivers/net/dpaa2/mc/fsl_dpci_cmd.h
+new file mode 100644
+index 0000000..f45e435
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpci_cmd.h
+@@ -0,0 +1,200 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPCI_CMD_H
++#define _FSL_DPCI_CMD_H
++
++/* DPCI Version */
++#define DPCI_VER_MAJOR 2
++#define DPCI_VER_MINOR 2
++
++/* Command IDs */
++#define DPCI_CMDID_CLOSE 0x800
++#define DPCI_CMDID_OPEN 0x807
++#define DPCI_CMDID_CREATE 0x907
++#define DPCI_CMDID_DESTROY 0x900
++
++#define DPCI_CMDID_ENABLE 0x002
++#define DPCI_CMDID_DISABLE 0x003
++#define DPCI_CMDID_GET_ATTR 0x004
++#define DPCI_CMDID_RESET 0x005
++#define DPCI_CMDID_IS_ENABLED 0x006
++
++#define DPCI_CMDID_SET_IRQ 0x010
++#define DPCI_CMDID_GET_IRQ 0x011
++#define DPCI_CMDID_SET_IRQ_ENABLE 0x012
++#define DPCI_CMDID_GET_IRQ_ENABLE 0x013
++#define DPCI_CMDID_SET_IRQ_MASK 0x014
++#define DPCI_CMDID_GET_IRQ_MASK 0x015
++#define DPCI_CMDID_GET_IRQ_STATUS 0x016
++#define DPCI_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPCI_CMDID_SET_RX_QUEUE 0x0e0
++#define DPCI_CMDID_GET_LINK_STATE 0x0e1
++#define DPCI_CMDID_GET_PEER_ATTR 0x0e2
++#define DPCI_CMDID_GET_RX_QUEUE 0x0e3
++#define DPCI_CMDID_GET_TX_QUEUE 0x0e4
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_OPEN(cmd, dpci_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_CREATE(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_of_priorities)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_of_priorities);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_PEER_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->peer_id);\
++ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_of_priorities);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_LINK_STATE(cmd, up) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, up)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority);\
++ MC_CMD_OP(cmd, 0, 48, 4, enum dpci_dest, cfg->dest_cfg.dest_type);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_GET_RX_QUEUE(cmd, priority) \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_RX_QUEUE(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 48, 4, enum dpci_dest, attr->dest_cfg.dest_type);\
++ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_CMD_GET_TX_QUEUE(cmd, priority) \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCI_RSP_GET_TX_QUEUE(cmd, attr) \
++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid)
++
++#endif /* _FSL_DPCI_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpcon.h b/drivers/net/dpaa2/mc/fsl_dpcon.h
+new file mode 100644
+index 0000000..2555be5
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpcon.h
+@@ -0,0 +1,407 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPCON_H
++#define __FSL_DPCON_H
++
++/* Data Path Concentrator API
++ * Contains initialization APIs and runtime control APIs for DPCON
++ */
++
++struct fsl_mc_io;
++
++/** General DPCON macros */
++
++/**
++ * Use it to disable notifications; see dpcon_set_notification()
++ */
++#define DPCON_INVALID_DPIO_ID (int)(-1)
++
++/**
++ * dpcon_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpcon_id: DPCON unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpcon_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpcon_id,
++ uint16_t *token);
++
++/**
++ * dpcon_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpcon_cfg - Structure representing DPCON configuration
++ * @num_priorities: Number of priorities for the DPCON channel (1-8)
++ */
++struct dpcon_cfg {
++ uint8_t num_priorities;
++};
++
++/**
++ * dpcon_create() - Create the DPCON object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPCON object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpcon_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpcon_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpcon_destroy() - Destroy the DPCON object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpcon_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpcon_enable() - Enable the DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpcon_disable() - Disable the DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpcon_is_enabled() - Check if the DPCON is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpcon_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpcon_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpcon_irq_cfg *irq_cfg);
++
++/**
++ * dpcon_get_irq() - Get IRQ information from the DPCON.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpcon_irq_cfg *irq_cfg);
++
++/**
++ * dpcon_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpcon_get_irq_enable() - Get overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpcon_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpcon_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpcon_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @status: interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpcon_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpcon_attr - Structure representing DPCON attributes
++ * @id: DPCON object ID
++ * @version: DPCON version
++ * @qbman_ch_id: Channel ID to be used by dequeue operation
++ * @num_priorities: Number of priorities for the DPCON channel (1-8)
++ */
++struct dpcon_attr {
++ int id;
++ /**
++ * struct version - DPCON version
++ * @major: DPCON major version
++ * @minor: DPCON minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint16_t qbman_ch_id;
++ uint8_t num_priorities;
++};
++
++/**
++ * dpcon_get_attributes() - Retrieve DPCON attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpcon_attr *attr);
++
++/**
++ * struct dpcon_notification_cfg - Structure representing notification parameters
++ * @dpio_id: DPIO object ID; must be configured with a notification channel;
++ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
++ * @priority: Priority selection within the DPIO channel; valid values
++ * are 0-7, depending on the number of priorities in that channel
++ * @user_ctx: User context value provided with each CDAN message
++ */
++struct dpcon_notification_cfg {
++ int dpio_id;
++ uint8_t priority;
++ uint64_t user_ctx;
++};
++
++/**
++ * dpcon_set_notification() - Set DPCON notification destination
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCON object
++ * @cfg: Notification parameters
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpcon_notification_cfg *cfg);
++
++#endif /* __FSL_DPCON_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h b/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h
+new file mode 100644
+index 0000000..ecb40d0
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h
+@@ -0,0 +1,162 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPCON_CMD_H
++#define _FSL_DPCON_CMD_H
++
++/* DPCON Version */
++#define DPCON_VER_MAJOR 2
++#define DPCON_VER_MINOR 2
++
++/* Command IDs */
++#define DPCON_CMDID_CLOSE 0x800
++#define DPCON_CMDID_OPEN 0x808
++#define DPCON_CMDID_CREATE 0x908
++#define DPCON_CMDID_DESTROY 0x900
++
++#define DPCON_CMDID_ENABLE 0x002
++#define DPCON_CMDID_DISABLE 0x003
++#define DPCON_CMDID_GET_ATTR 0x004
++#define DPCON_CMDID_RESET 0x005
++#define DPCON_CMDID_IS_ENABLED 0x006
++
++#define DPCON_CMDID_SET_IRQ 0x010
++#define DPCON_CMDID_GET_IRQ 0x011
++#define DPCON_CMDID_SET_IRQ_ENABLE 0x012
++#define DPCON_CMDID_GET_IRQ_ENABLE 0x013
++#define DPCON_CMDID_SET_IRQ_MASK 0x014
++#define DPCON_CMDID_GET_IRQ_MASK 0x015
++#define DPCON_CMDID_GET_IRQ_STATUS 0x016
++#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPCON_CMDID_SET_NOTIFICATION 0x100
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_OPEN(cmd, dpcon_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_CREATE(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\
++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
++} while (0)
++
++#endif /* _FSL_DPCON_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpdbg.h b/drivers/net/dpaa2/mc/fsl_dpdbg.h
+new file mode 100644
+index 0000000..ead22e8
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpdbg.h
+@@ -0,0 +1,635 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPDBG_H
++#define __FSL_DPDBG_H
++
++#include <fsl_dpkg.h>
++#include <fsl_dpmac.h>
++#include <fsl_dpni.h>
++
++/* Data Path Debug API
++ * Contains initialization APIs and runtime control APIs for DPDBG
++ */
++
++struct fsl_mc_io;
++
++/**
++ * dpdbg_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpdbg_id: DPDBG unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object;
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdbg_id,
++ uint16_t *token);
++
++/**
++ * dpdbg_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdbg_attr - Structure representing DPDBG attributes
++ * @id: DPDBG object ID
++ * @version: DPDBG version
++ */
++struct dpdbg_attr {
++ int id;
++ /**
++ * struct version - Structure representing DPDBG version
++ * @major: DPDBG major version
++ * @minor: DPDBG minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++};
++
++/**
++ * dpdbg_get_attributes - Retrieve DPDBG attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdbg_attr *attr);
++
++/**
++ * struct dpdbg_dpni_info - Info of DPNI
++ * @max_senders: Maximum number of different senders; used as the number
++ * of dedicated Tx flows; Non-power-of-2 values are rounded
++ * up to the next power-of-2 value as hardware demands it;
++ * '0' will be treated as '1'
++ * @qdid: Virtual QDID.
++ * @err_fqid: Virtual FQID for error queues
++ * @tx_conf_fqid: Virtual FQID for global TX confirmation queue
++ */
++struct dpdbg_dpni_info {
++ uint8_t max_senders;
++ uint32_t qdid;
++ uint32_t err_fqid;
++ uint32_t tx_conf_fqid;
++};
++
++/**
++ * dpdbg_get_dpni_info() - Retrieve info for a specific DPNI
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpni_id: The requested DPNI ID
++ * @info: The returned info
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_get_dpni_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ struct dpdbg_dpni_info *info);
++
++/**
++ * dpdbg_get_dpni_private_fqid() - Retrieve the virtual TX confirmation queue
++ * FQID of the required DPNI
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpni_id: The requested DPNI ID
++ * @sender_id: The requested sender ID
++ * @fqid: The returned virtual private TX confirmation FQID.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_get_dpni_priv_tx_conf_fqid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ uint8_t sender_id,
++ uint32_t *fqid);
++
++/**
++ * struct dpdbg_dpcon_info - Info of DPCON
++ * @ch_id: Channel ID
++ */
++struct dpdbg_dpcon_info {
++ uint32_t ch_id;
++};
++
++/**
++ * dpdbg_get_dpcon_info() - Retrieve info of DPCON
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpcon_id: The requested DPCON ID
++ * @info: The returned info.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_get_dpcon_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id,
++ struct dpdbg_dpcon_info *info);
++
++/**
++ * struct dpdbg_dpbp_info - Info of DPBP
++ * @bpid: Virtual buffer pool ID
++ */
++struct dpdbg_dpbp_info {
++ uint32_t bpid;
++};
++
++/**
++ * dpdbg_get_dpbp_info() - Retrieve info of DPBP
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpbp_id: The requested DPBP ID
++ * @info: The returned info.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_get_dpbp_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpbp_id,
++ struct dpdbg_dpbp_info *info);
++
++/**
++ * dpdbg_get_dpci_fqid() - Retrieve the virtual FQID of the required DPCI
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpci_id: The requested DPCI ID
++ * @priority: Select the queue relative to number of priorities configured at
++ * DPCI creation
++ * @fqid: The returned virtual FQID.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_get_dpci_fqid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpci_id,
++ uint8_t priority,
++ uint32_t *fqid);
++
++/**
++ * Maximum size for rule match (in bytes)
++ */
++#define DPDBG_MAX_RULE_SIZE 56
++/**
++ * Disable marking
++ */
++#define DPDBG_DISABLE_MARKING 0xFF
++
++/**
++ * dpdbg_prepare_ctlu_global_rule() - function prepare extract parameters
++ * @dpkg_rule: defining a full Key Generation profile (rule)
++ * @rule_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before dpdbg_set_global_marking()
++ */
++int dpdbg_prepare_ctlu_global_rule(struct dpkg_profile_cfg *dpkg_rule,
++ uint8_t *rule_buf);
++
++/**
++ * struct dpdbg_rule_cfg - Rule configuration for table lookup
++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
++ * @rule_iova: I/O virtual address of the rule (must be in DMA-able memory)
++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
++ * @key_size: key and mask size (in bytes)
++ */
++struct dpdbg_rule_cfg {
++ uint64_t key_iova;
++ uint64_t mask_iova;
++ uint64_t rule_iova;
++ uint8_t key_size;
++};
++
++/**
++ * dpdbg_set_ctlu_global_marking() - Set marking for all match rule frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @marking: The requested Debug marking
++ * @cfg: Marking rule to add
++ *
++ * Warning: must be called after dpdbg_prepare_global_rule()
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_ctlu_global_marking(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t marking,
++ struct dpdbg_rule_cfg *cfg);
++
++/**
++ * All traffic classes considered
++ */
++#define DPDBG_DPNI_ALL_TCS (uint8_t)(-1)
++/**
++ * All flows within traffic class considered
++ */
++#define DPDBG_DPNI_ALL_TC_FLOWS (uint8_t)(-1)
++/**
++ * All buffer pools considered
++ */
++#define DPDBG_DPNI_ALL_DPBP (uint8_t)(-1)
++
++/**
++ * struct dpdbg_dpni_rx_marking_cfg - Ingress frame configuration
++ * @tc_id: Traffic class ID (0-7); DPDBG_DPNI_ALL_TCS for all traffic classes.
++ * @flow_id: Rx flow id within the traffic class; use
++ * 'DPDBG_DPNI_ALL_TC_FLOWS' to set all flows within this tc_id;
++ * ignored if tc_id is set to 'DPDBG_DPNI_ALL_TCS';
++ * @dpbp_id: buffer pool ID; 'DPDBG_DPNI_ALL_DPBP' to set all DPBP
++ * @marking: Marking for match frames;
++ * 'DPDBG_DISABLE_MARKING' for disable marking
++ */
++struct dpdbg_dpni_rx_marking_cfg {
++ uint8_t tc_id;
++ uint16_t flow_id;
++ uint16_t dpbp_id;
++ uint8_t marking;
++};
++
++/**
++ * dpdbg_set_dpni_rx_marking() - Set Rx frame marking for DPNI
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpni_id: The requested DPNI ID
++ * @cfg: RX frame marking configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_dpni_rx_marking(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ struct dpdbg_dpni_rx_marking_cfg *cfg);
++
++/* selects global confirmation queues */
++#define DPDBG_DPNI_GLOBAL_TX_CONF_QUEUE (uint16_t)(-1)
++
++/**
++ * dpdbg_set_dpni_tx_conf_marking() - Set Tx frame marking for DPNI
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpni_id: The requested DPNI ID
++ * @sender_id: Sender Id for the confirmation queue;
++ * 'DPDBG_DPNI_GLOBAL_TX_CONF_QUEUE' for global confirmation queue
++ * @marking: The requested marking;
++ * 'DPDBG_DISABLE_MARKING' for disable marking
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_dpni_tx_conf_marking(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ uint16_t sender_id,
++ uint8_t marking);
++
++/**
++ * dpdbg_set_dpio_marking() - Set debug frame marking on enqueue
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpio_id: The requested DPIO ID
++ * @marking: The requested marking;
++ * 'DPDBG_DISABLE_MARKING' for disable marking
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_dpio_marking(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpio_id,
++ uint8_t marking);
++
++/**
++ * enum dpdbg_verbosity_level - Trace verbosity level
++ * @DPDBG_VERBOSITY_LEVEL_DISABLE: Trace disabled
++ * @DPDBG_VERBOSITY_LEVEL_TERSE: Terse trace
++ * @DPDBG_VERBOSITY_LEVEL_VERBOSE: Verbose trace
++ */
++enum dpdbg_verbosity_level {
++ DPDBG_VERBOSITY_LEVEL_DISABLE = 0,
++ DPDBG_VERBOSITY_LEVEL_TERSE,
++ DPDBG_VERBOSITY_LEVEL_VERBOSE
++};
++
++/**
++ * dpdbg_set_ctlu_global_trace() - Set global trace configuration for CTLU trace
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @cfg: trace rule to add
++ *
++ * Warning: must be called after dpdbg_prepare_global_rule()
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_ctlu_global_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdbg_rule_cfg *cfg);
++
++/**
++ * Number of DPIO trace points
++ */
++#define DPDBG_NUM_OF_DPIO_TRACE_POINTS 2
++
++/**
++ * enum dpdbg_dpio_trace_type - Define Trace point type
++ * @DPDBG_DPIO_TRACE_TYPE_ENQUEUE: This trace point triggers when an enqueue
++ * command, received via this portal,
++ * and containing a marked frame, is executed
++ * @DPDBG_DPIO_TRACE_TYPE_DEFERRED: This trace point triggers when the deferred
++ * enqueue of a marked frame received via this
++ * portal completes
++ */
++enum dpdbg_dpio_trace_type {
++ DPDBG_DPIO_TRACE_TYPE_ENQUEUE = 0,
++ DPDBG_DPIO_TRACE_TYPE_DEFERRED = 1
++};
++
++/**
++ * struct dpdbg_dpio_trace_cfg - Configure the behavior of a trace point
++ * when a frame marked with the specified DD code point is
++ * encountered
++ * @marking: this field will be written into the DD field of every FD
++ * enqueued in this DPIO.
++ * 'DPDBG_DISABLE_MARKING' for disable marking
++ * @verbosity: Verbosity level
++ * @enqueue_type: Enqueue trace point type defining a full Key Generation
++ * profile (rule)
++ */
++struct dpdbg_dpio_trace_cfg {
++ uint8_t marking;
++ enum dpdbg_verbosity_level verbosity;
++ enum dpdbg_dpio_trace_type enqueue_type;
++};
++
++/**
++ * dpdbg_set_dpio_trace() - Set trace for DPIO for every enqueued frame to
++ * the portal
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpio_id: The requested DPIO ID
++ * @trace_point: Trace points configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_dpio_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpio_id,
++ struct dpdbg_dpio_trace_cfg
++ trace_point[DPDBG_NUM_OF_DPIO_TRACE_POINTS]);
++
++/**
++ * struct dpdbg_dpni_trace_cfg - Configure the behavior of a trace point when a
++ * @tc_id: Traffic class ID (0-7); DPDBG_DPNI_ALL_TCS for all traffic classes.
++ * @flow_id: Rx flow id within the traffic class; use
++ * 'DPDBG_DPNI_ALL_TC_FLOWS' to set all flows within this tc_id;
++ * ignored if tc_id is set to 'DPDBG_DPNI_ALL_TCS';
++ * @dpbp_id: buffer pool ID; 'DPDBG_DPNI_ALL_DPBP' to set all DPBP
++ * @marking: Marking for match frames;
++ * 'DPDBG_DISABLE_MARKING' for disable marking
++ */
++struct dpdbg_dpni_rx_trace_cfg {
++ uint8_t tc_id;
++ uint16_t flow_id;
++ uint16_t dpbp_id;
++ uint8_t marking;
++};
++
++/**
++ * dpdbg_set_dpni_rx_trace() - Set trace for DPNI ingress (WRIOP ingress).
++ * in case of multiple requests for different DPNIs - the trace
++ * will be for the latest DPNI requested.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpni_id: The requested DPNI ID
++ * @trace_cfg: Trace configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_dpni_rx_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ struct dpdbg_dpni_rx_trace_cfg *trace_cfg);
++
++/**
++ * All DPNI senders
++ */
++#define DPDBG_DPNI_ALL_SENDERS (uint16_t)(-1)
++
++/**
++ * struct dpdbg_dpni_trace_cfg - Configure the behavior of a trace point when a
++ * frame marked with the specified DD code point is encountered
++ * @marking: The requested debug marking;
++ * 'DPDBG_DISABLE_MARKING' for disable marking
++ */
++struct dpdbg_dpni_tx_trace_cfg {
++ uint8_t marking;
++};
++
++/**
++ * dpdbg_set_dpni_tx_trace() - Set trace for DPNI dequeued frames
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpni_id: The requested DPNI ID
++ * @sender_id: Sender ID; 'DPDBG_DPNI_ALL_SENDERS' for all senders
++ * @trace_cfg: Trace configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_dpni_tx_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ uint16_t sender_id,
++ struct dpdbg_dpni_tx_trace_cfg *trace_cfg);
++
++/**
++ * Number of DPCON trace points
++ */
++#define DPDBG_NUM_OF_DPCON_TRACE_POINTS 2
++
++/**
++ * struct dpdbg_dpcon_trace_cfg - Configure the behavior of a trace point when a
++ * frame marked with the specified DD code point is encountered
++ * @marking: The requested debug marking;
++ * 'DPDBG_DISABLE_MARKING' for disable marking
++ * @verbosity: Verbosity level
++ */
++struct dpdbg_dpcon_trace_cfg {
++ uint8_t marking;
++ enum dpdbg_verbosity_level verbosity;
++};
++
++/**
++ * dpdbg_set_dpcon_trace() - Set trace for DPCON when a frame marked with a
++ * specified marking is dequeued from a WQ in the
++ * channel selected
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpcon_id: The requested DPCON ID
++ * @trace_point: Trace points configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_dpcon_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id,
++ struct dpdbg_dpcon_trace_cfg
++ trace_point[DPDBG_NUM_OF_DPCON_TRACE_POINTS]);
++
++/**
++ * Number of DPSECI trace points
++ */
++#define DPDBG_NUM_OF_DPSECI_TRACE_POINTS 2
++
++/**
++ * struct dpdbg_dpseci_trace_cfg - Configure the behavior of a trace point when
++ * a frame marked with the specified DD code point is
++ * encountered
++ * @marking: The requested debug marking;
++ * 'DPDBG_DISABLE_MARKING' for disable marking
++ * @verbosity: Verbosity level
++ */
++struct dpdbg_dpseci_trace_cfg {
++ uint8_t marking;
++ enum dpdbg_verbosity_level verbosity;
++};
++
++/**
++ * dpdbg_set_dpseci_trace() - Set trace for DPSECI when a frame marked with the
++ * specific marking is enqueued via this portal.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpseci_id: The requested DPSECI ID
++ * @trace_point: Trace points configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_set_dpseci_trace(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpseci_id,
++ struct dpdbg_dpseci_trace_cfg
++ trace_point[DPDBG_NUM_OF_DPSECI_TRACE_POINTS]);
++
++/**
++ * dpdbg_get_dpmac_counter() - DPMAC packet throughput
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpmac_id: The requested DPMAC ID
++ * @counter_type: The requested DPMAC counter
++ * @counter: Returned counter value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_get_dpmac_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpmac_id,
++ enum dpmac_counter counter_type,
++ uint64_t *counter);
++
++/**
++ * dpdbg_get_dpni_counter() - DPNI packet throughput
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDBG object
++ * @dpni_id: The requested DPNI ID
++ * @counter_type: The requested DPNI counter
++ * @counter: Returned counter value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdbg_get_dpni_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpni_id,
++ enum dpni_counter counter_type,
++ uint64_t *counter);
++
++#endif /* __FSL_DPDBG_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h
+new file mode 100644
+index 0000000..b672788
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h
+@@ -0,0 +1,249 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPDBG_CMD_H
++#define _FSL_DPDBG_CMD_H
++
++/* DPDBG Version */
++#define DPDBG_VER_MAJOR 1
++#define DPDBG_VER_MINOR 0
++
++/* Command IDs */
++#define DPDBG_CMDID_CLOSE 0x800
++#define DPDBG_CMDID_OPEN 0x80F
++
++#define DPDBG_CMDID_GET_ATTR 0x004
++
++#define DPDBG_CMDID_GET_DPNI_INFO 0x130
++#define DPDBG_CMDID_GET_DPNI_PRIV_TX_CONF_FQID 0x131
++#define DPDBG_CMDID_GET_DPCON_INFO 0x132
++#define DPDBG_CMDID_GET_DPBP_INFO 0x133
++#define DPDBG_CMDID_GET_DPCI_FQID 0x134
++
++#define DPDBG_CMDID_SET_CTLU_GLOBAL_MARKING 0x135
++#define DPDBG_CMDID_SET_DPNI_RX_MARKING 0x136
++#define DPDBG_CMDID_SET_DPNI_TX_CONF_MARKING 0x137
++#define DPDBG_CMDID_SET_DPIO_MARKING 0x138
++
++#define DPDBG_CMDID_SET_CTLU_GLOBAL_TRACE 0x140
++#define DPDBG_CMDID_SET_DPIO_TRACE 0x141
++#define DPDBG_CMDID_SET_DPNI_RX_TRACE 0x142
++#define DPDBG_CMDID_SET_DPNI_TX_TRACE 0x143
++#define DPDBG_CMDID_SET_DPCON_TRACE 0x145
++#define DPDBG_CMDID_SET_DPSECI_TRACE 0x146
++
++#define DPDBG_CMDID_GET_DPMAC_COUNTER 0x150
++#define DPDBG_CMDID_GET_DPNI_COUNTER 0x151
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_OPEN(cmd, dpdbg_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpdbg_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_RSP_GET_ATTRIBUTES(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_GET_DPNI_INFO(cmd, dpni_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_RSP_GET_DPNI_INFO(cmd, info) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, info->qdid);\
++ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, info->max_senders);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, info->err_fqid);\
++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, info->tx_conf_fqid);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_GET_DPNI_PRIV_TX_CONF_FQID(cmd, dpni_id, sender_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, sender_id);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_RSP_GET_DPNI_PRIV_TX_CONF_FQID(cmd, fqid) \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, fqid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_GET_DPCON_INFO(cmd, dpcon_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_RSP_GET_DPCON_INFO(cmd, info) \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, info->ch_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_GET_DPBP_INFO(cmd, dpbp_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_RSP_GET_DPBP_INFO(cmd, info) \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, info->bpid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_GET_DPCI_FQID(cmd, dpci_id, priority) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, priority);\
++} while (0)
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_RSP_GET_DPCI_FQID(cmd, fqid) \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, fqid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_SET_CTLU_GLOBAL_MARKING(cmd, marking, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, marking);\
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->rule_iova); \
++} while (0)
++
++#define DPDBG_CMD_SET_DPNI_RX_MARKING(cmd, dpni_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->tc_id);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->flow_id);\
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->dpbp_id);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->marking);\
++} while (0)
++
++#define DPDBG_CMD_SET_DPNI_TX_CONF_MARKING(cmd, dpni_id, sender_id, marking) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, sender_id);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, marking);\
++} while (0)
++
++#define DPDBG_CMD_SET_DPIO_MARKING(cmd, dpio_id, marking) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, marking);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_SET_CTLU_GLOBAL_TRACE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->rule_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_SET_DPIO_TRACE(cmd, dpio_id, trace_point) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id);\
++ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \
++ trace_point[0].verbosity); \
++ MC_CMD_OP(cmd, 1, 4, 4, enum dpdbg_dpio_trace_type, \
++ trace_point[0].enqueue_type); \
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \
++ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \
++ trace_point[1].verbosity); \
++ MC_CMD_OP(cmd, 1, 36, 4, enum dpdbg_dpio_trace_type, \
++ trace_point[1].enqueue_type); \
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_SET_DPNI_RX_TRACE(cmd, dpni_id, trace_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, trace_cfg->tc_id);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, trace_cfg->flow_id);\
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, trace_cfg->dpbp_id);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, trace_cfg->marking);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_SET_DPNI_TX_TRACE(cmd, dpni_id, sender_id, trace_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, sender_id);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, trace_cfg->marking);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_SET_DPCON_TRACE(cmd, dpcon_id, trace_point) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id);\
++ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \
++ trace_point[0].verbosity); \
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \
++ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \
++ trace_point[1].verbosity); \
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_SET_DPSECI_TRACE(cmd, dpseci_id, trace_point) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id);\
++ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \
++ trace_point[0].verbosity); \
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \
++ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \
++ trace_point[1].verbosity); \
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpmac_id, counter_type) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id);\
++ MC_CMD_OP(cmd, 0, 32, 16, enum dpmac_counter, counter_type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_RSP_GET_DPMAC_COUNTER(cmd, counter) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_CMD_GET_DPNI_COUNTER(cmd, dpni_id, counter_type) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
++ MC_CMD_OP(cmd, 0, 32, 16, enum dpni_counter, counter_type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDBG_RSP_GET_DPNI_COUNTER(cmd, counter) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
++
++#endif /* _FSL_DPDBG_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpdcei.h b/drivers/net/dpaa2/mc/fsl_dpdcei.h
+new file mode 100644
+index 0000000..319795c
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpdcei.h
+@@ -0,0 +1,515 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPDCEI_H
++#define __FSL_DPDCEI_H
++
++/* Data Path DCE Interface API
++ * Contains initialization APIs and runtime control APIs for DPDCEI
++ */
++
++struct fsl_mc_io;
++
++/** General DPDCEI macros */
++
++/**
++ * Indicates an invalid frame queue
++ */
++#define DPDCEI_FQID_NOT_VALID (uint32_t)(-1)
++
++/**
++ * enum dpdcei_engine - DCE engine block
++ * @DPDCEI_ENGINE_COMPRESSION: Engine compression
++ * @DPDCEI_ENGINE_DECOMPRESSION: Engine decompression
++ */
++enum dpdcei_engine {
++ DPDCEI_ENGINE_COMPRESSION,
++ DPDCEI_ENGINE_DECOMPRESSION
++};
++
++/**
++ * dpdcei_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @dpdcei_id: DPDCEI unique ID
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpdcei_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdcei_id,
++ uint16_t *token);
++
++/**
++ * dpdcei_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdcei_cfg - Structure representing DPDCEI configuration
++ * @engine: compression or decompression engine to be selected
++ * @priority: Priority for the DCE hardware processing (valid values 1-8).
++ */
++struct dpdcei_cfg {
++ enum dpdcei_engine engine;
++ uint8_t priority;
++};
++
++/**
++ * dpdcei_create() - Create the DPDCEI object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @cfg: configuration parameters
++ *
++ * Create the DPDCEI object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpdcei_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdcei_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpdcei_destroy() - Destroy the DPDCEI object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpdcei_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdcei_enable() - Enable the DPDCEI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdcei_disable() - Disable the DPDCEI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdcei_is_enabled() - Check if the DPDCEI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @en: Return '1' for object enabled/'0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpdcei_reset() - Reset the DPDCEI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdcei_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpdcei_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpdcei_set_irq() - Set IRQ information for the DPDCEI to trigger an interrupt
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdcei_irq_cfg *irq_cfg);
++
++/**
++ * dpdcei_get_irq() - Get IRQ information from the DPDCEI
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdcei_irq_cfg *irq_cfg);
++
++/**
++ * dpdcei_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpdcei_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned Interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpdcei_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpdcei_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpdcei_get_irq_status() - Get the current status of any pending interrupts
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpdcei_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++/**
++ * struct dpdcei_attr - Structure representing DPDCEI attributes
++ * @id: DPDCEI object ID
++ * @engine: DCE engine block
++ * @version: DPDCEI version
++ */
++struct dpdcei_attr {
++ int id;
++ enum dpdcei_engine engine;
++ /**
++ * struct version - DPDCEI version
++ * @major: DPDCEI major version
++ * @minor: DPDCEI minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++};
++
++/**
++ * dpdcei_get_attributes() - Retrieve DPDCEI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdcei_attr *attr);
++
++/**
++ * enum dpdcei_dest - DPDCEI destination types
++ * @DPDCEI_DEST_NONE: Unassigned destination; The queue is set in parked mode
++ * and does not generate FQDAN notifications;
++ * user is expected to dequeue from the queue based on
++ * polling or other user-defined method
++ * @DPDCEI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to
++ * dequeue from the queue only after notification is
++ * received
++ * @DPDCEI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified
++ * DPCON object;
++ * user is expected to dequeue from the DPCON channel
++ */
++enum dpdcei_dest {
++ DPDCEI_DEST_NONE = 0,
++ DPDCEI_DEST_DPIO = 1,
++ DPDCEI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpdcei_dest_cfg - Structure representing DPDCEI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid values
++ * are 0-1 or 0-7, depending on the number of priorities in that
++ * channel; not relevant for 'DPDCEI_DEST_NONE' option
++ */
++struct dpdcei_dest_cfg {
++ enum dpdcei_dest dest_type;
++ int dest_id;
++ uint8_t priority;
++};
++
++/** DPDCEI queue modification options */
++
++/**
++ * Select to modify the user's context associated with the queue
++ */
++#define DPDCEI_QUEUE_OPT_USER_CTX 0x00000001
++
++/**
++ * Select to modify the queue's destination
++ */
++#define DPDCEI_QUEUE_OPT_DEST 0x00000002
++
++/**
++ * struct dpdcei_rx_queue_cfg - RX queue configuration
++ * @options: Flags representing the suggested modifications to the queue;
++ * Use any combination of 'DPDCEI_QUEUE_OPT_<X>' flags
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame;
++ * valid only if 'DPDCEI_QUEUE_OPT_USER_CTX' is contained in 'options'
++ * @dest_cfg: Queue destination parameters;
++ * valid only if 'DPDCEI_QUEUE_OPT_DEST' is contained in 'options'
++ */
++struct dpdcei_rx_queue_cfg {
++ uint32_t options;
++ uint64_t user_ctx;
++ struct dpdcei_dest_cfg dest_cfg;
++};
++
++/**
++ * dpdcei_set_rx_queue() - Set Rx queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @cfg: Rx queue configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpdcei_rx_queue_cfg *cfg);
++
++/**
++ * struct dpdcei_rx_queue_attr - Structure representing attributes of Rx queues
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame
++ * @dest_cfg: Queue destination configuration
++ * @fqid: Virtual FQID value to be used for dequeue operations
++ */
++struct dpdcei_rx_queue_attr {
++ uint64_t user_ctx;
++ struct dpdcei_dest_cfg dest_cfg;
++ uint32_t fqid;
++};
++
++/**
++ * dpdcei_get_rx_queue() - Retrieve Rx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @attr: Returned Rx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdcei_rx_queue_attr *attr);
++
++/**
++ * struct dpdcei_tx_queue_attr - Structure representing attributes of Tx queues
++ * @fqid: Virtual FQID to be used for sending frames to DCE hardware
++ */
++struct dpdcei_tx_queue_attr {
++ uint32_t fqid;
++};
++
++/**
++ * dpdcei_get_tx_queue() - Retrieve Tx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDCEI object
++ * @attr: Returned Tx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdcei_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdcei_tx_queue_attr *attr);
++
++#endif /* __FSL_DPDCEI_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h
+new file mode 100644
+index 0000000..8452d88
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h
+@@ -0,0 +1,182 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPDCEI_CMD_H
++#define _FSL_DPDCEI_CMD_H
++
++/* DPDCEI Version */
++#define DPDCEI_VER_MAJOR 1
++#define DPDCEI_VER_MINOR 2
++
++/* Command IDs */
++#define DPDCEI_CMDID_CLOSE 0x800
++#define DPDCEI_CMDID_OPEN 0x80D
++#define DPDCEI_CMDID_CREATE 0x90D
++#define DPDCEI_CMDID_DESTROY 0x900
++
++#define DPDCEI_CMDID_ENABLE 0x002
++#define DPDCEI_CMDID_DISABLE 0x003
++#define DPDCEI_CMDID_GET_ATTR 0x004
++#define DPDCEI_CMDID_RESET 0x005
++#define DPDCEI_CMDID_IS_ENABLED 0x006
++
++#define DPDCEI_CMDID_SET_IRQ 0x010
++#define DPDCEI_CMDID_GET_IRQ 0x011
++#define DPDCEI_CMDID_SET_IRQ_ENABLE 0x012
++#define DPDCEI_CMDID_GET_IRQ_ENABLE 0x013
++#define DPDCEI_CMDID_SET_IRQ_MASK 0x014
++#define DPDCEI_CMDID_GET_IRQ_MASK 0x015
++#define DPDCEI_CMDID_GET_IRQ_STATUS 0x016
++#define DPDCEI_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPDCEI_CMDID_SET_RX_QUEUE 0x1B0
++#define DPDCEI_CMDID_GET_RX_QUEUE 0x1B1
++#define DPDCEI_CMDID_GET_TX_QUEUE 0x1B2
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_OPEN(cmd, dpdcei_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpdcei_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, enum dpdcei_engine, cfg->engine);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priority);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
++ MC_RSP_OP(cmd, 0, 32, 8, enum dpdcei_engine, attr->engine); \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_CMD_SET_RX_QUEUE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 0, 48, 4, enum dpdcei_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_RSP_GET_RX_QUEUE(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 48, 4, enum dpdcei_dest, attr->dest_cfg.dest_type);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDCEI_RSP_GET_TX_QUEUE(cmd, attr) \
++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid)
++
++#endif /* _FSL_DPDCEI_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpdmai.h b/drivers/net/dpaa2/mc/fsl_dpdmai.h
+new file mode 100644
+index 0000000..e931ce1
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpdmai.h
+@@ -0,0 +1,521 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPDMAI_H
++#define __FSL_DPDMAI_H
++
++struct fsl_mc_io;
++
++/* Data Path DMA Interface API
++ * Contains initialization APIs and runtime control APIs for DPDMAI
++ */
++
++/* General DPDMAI macros */
++
++/**
++ * Maximum number of Tx/Rx priorities per DPDMAI object
++ */
++#define DPDMAI_PRIO_NUM 2
++
++/**
++ * All queues considered; see dpdmai_set_rx_queue()
++ */
++#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
++
++/**
++ * dpdmai_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpdmai_id: DPDMAI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpdmai_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdmai_id,
++ uint16_t *token);
++
++/**
++ * dpdmai_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdmai_cfg - Structure representing DPDMAI configuration
++ * @priorities: Priorities for the DMA hardware processing; valid priorities are
++ * configured with values 1-8; the entry following last valid entry
++ * should be configured with 0
++ */
++struct dpdmai_cfg {
++ uint8_t priorities[DPDMAI_PRIO_NUM];
++};
++
++/**
++ * dpdmai_create() - Create the DPDMAI object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPDMAI object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpdmai_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdmai_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpdmai_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdmai_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpdmai_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdmai_irq_cfg *irq_cfg);
++
++/**
++ * dpdmai_get_irq() - Get IRQ information from the DPDMAI
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdmai_irq_cfg *irq_cfg);
++
++/**
++ * dpdmai_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpdmai_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned Interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpdmai_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpdmai_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpdmai_get_irq_status() - Get the current status of any pending interrupts
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpdmai_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpdmai_attr - Structure representing DPDMAI attributes
++ * @id: DPDMAI object ID
++ * @version: DPDMAI version
++ * @num_of_priorities: number of priorities
++ */
++struct dpdmai_attr {
++ int id;
++ /**
++ * struct version - DPDMAI version
++ * @major: DPDMAI major version
++ * @minor: DPDMAI minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint8_t num_of_priorities;
++};
++
++/**
++ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdmai_attr *attr);
++
++/**
++ * enum dpdmai_dest - DPDMAI destination types
++ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
++ * and does not generate FQDAN notifications; user is expected to dequeue
++ * from the queue based on polling or other user-defined method
++ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue
++ * from the queue only after notification is received
++ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON object;
++ * user is expected to dequeue from the DPCON channel
++ */
++enum dpdmai_dest {
++ DPDMAI_DEST_NONE = 0,
++ DPDMAI_DEST_DPIO = 1,
++ DPDMAI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid values
++ * are 0-1 or 0-7, depending on the number of priorities in that
++ * channel; not relevant for 'DPDMAI_DEST_NONE' option
++ */
++struct dpdmai_dest_cfg {
++ enum dpdmai_dest dest_type;
++ int dest_id;
++ uint8_t priority;
++};
++
++/* DPDMAI queue modification options */
++
++/**
++ * Select to modify the user's context associated with the queue
++ */
++#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
++
++/**
++ * Select to modify the queue's destination
++ */
++#define DPDMAI_QUEUE_OPT_DEST 0x00000002
++
++/**
++ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
++ * @options: Flags representing the suggested modifications to the queue;
++ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame;
++ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
++ * @dest_cfg: Queue destination parameters;
++ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
++ */
++struct dpdmai_rx_queue_cfg {
++ uint32_t options;
++ uint64_t user_ctx;
++ struct dpdmai_dest_cfg dest_cfg;
++
++};
++
++/**
++ * dpdmai_set_rx_queue() - Set Rx queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @priority: Select the queue relative to number of
++ * priorities configured at DPDMAI creation; use
++ * DPDMAI_ALL_QUEUES to configure all Rx queues
++ * identically.
++ * @cfg: Rx queue configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ const struct dpdmai_rx_queue_cfg *cfg);
++
++/**
++ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame
++ * @dest_cfg: Queue destination configuration
++ * @fqid: Virtual FQID value to be used for dequeue operations
++ */
++struct dpdmai_rx_queue_attr {
++ uint64_t user_ctx;
++ struct dpdmai_dest_cfg dest_cfg;
++ uint32_t fqid;
++};
++
++/**
++ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @priority: Select the queue relative to number of
++ * priorities configured at DPDMAI creation
++ * @attr: Returned Rx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpdmai_rx_queue_attr *attr);
++
++/**
++ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
++ * @fqid: Virtual FQID to be used for sending frames to DMA hardware
++ */
++
++struct dpdmai_tx_queue_attr {
++ uint32_t fqid;
++};
++
++/**
++ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMAI object
++ * @priority: Select the queue relative to number of
++ * priorities configured at DPDMAI creation
++ * @attr: Returned Tx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t priority,
++ struct dpdmai_tx_queue_attr *attr);
++
++#endif /* __FSL_DPDMAI_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h
+new file mode 100644
+index 0000000..7c4a31a
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h
+@@ -0,0 +1,191 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPDMAI_CMD_H
++#define _FSL_DPDMAI_CMD_H
++
++/* DPDMAI Version */
++#define DPDMAI_VER_MAJOR 2
++#define DPDMAI_VER_MINOR 2
++
++/* Command IDs */
++#define DPDMAI_CMDID_CLOSE 0x800
++#define DPDMAI_CMDID_OPEN 0x80E
++#define DPDMAI_CMDID_CREATE 0x90E
++#define DPDMAI_CMDID_DESTROY 0x900
++
++#define DPDMAI_CMDID_ENABLE 0x002
++#define DPDMAI_CMDID_DISABLE 0x003
++#define DPDMAI_CMDID_GET_ATTR 0x004
++#define DPDMAI_CMDID_RESET 0x005
++#define DPDMAI_CMDID_IS_ENABLED 0x006
++
++#define DPDMAI_CMDID_SET_IRQ 0x010
++#define DPDMAI_CMDID_GET_IRQ 0x011
++#define DPDMAI_CMDID_SET_IRQ_ENABLE 0x012
++#define DPDMAI_CMDID_GET_IRQ_ENABLE 0x013
++#define DPDMAI_CMDID_SET_IRQ_MASK 0x014
++#define DPDMAI_CMDID_GET_IRQ_MASK 0x015
++#define DPDMAI_CMDID_GET_IRQ_STATUS 0x016
++#define DPDMAI_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPDMAI_CMDID_SET_RX_QUEUE 0x1A0
++#define DPDMAI_CMDID_GET_RX_QUEUE 0x1A1
++#define DPDMAI_CMDID_GET_TX_QUEUE 0x1A2
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
++ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
++
++#endif /* _FSL_DPDMAI_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h
+new file mode 100644
+index 0000000..455a042
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h
+@@ -0,0 +1,724 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPDMUX_H
++#define __FSL_DPDMUX_H
++
++#include <fsl_net.h>
++
++struct fsl_mc_io;
++
++/* Data Path Demux API
++ * Contains API for handling DPDMUX topology and functionality
++ */
++
++/**
++ * dpdmux_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpdmux_id: DPDMUX unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpdmux_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpdmux_id,
++ uint16_t *token);
++
++/**
++ * dpdmux_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPDMUX general options
++ */
++
++/**
++ * Enable bridging between internal interfaces
++ */
++#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
++
++#define DPDMUX_IRQ_INDEX_IF 0x0000
++#define DPDMUX_IRQ_INDEX 0x0001
++
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
++
++/**
++ * enum dpdmux_manip - DPDMUX manipulation operations
++ * @DPDMUX_MANIP_NONE: No manipulation on frames
++ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
++ */
++enum dpdmux_manip {
++ DPDMUX_MANIP_NONE = 0x0,
++ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
++};
++
++/**
++ * enum dpdmux_method - DPDMUX method options
++ * @DPDMUX_METHOD_NONE: no DPDMUX method
++ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
++ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
++ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
++ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
++ */
++enum dpdmux_method {
++ DPDMUX_METHOD_NONE = 0x0,
++ DPDMUX_METHOD_C_VLAN_MAC = 0x1,
++ DPDMUX_METHOD_MAC = 0x2,
++ DPDMUX_METHOD_C_VLAN = 0x3,
++ DPDMUX_METHOD_S_VLAN = 0x4
++};
++
++/**
++ * struct dpdmux_cfg - DPDMUX configuration parameters
++ * @method: Defines the operation method for the DPDMUX address table
++ * @manip: Required manipulation operation
++ * @num_ifs: Number of interfaces (excluding the uplink interface)
++ * @adv: Advanced parameters; default is all zeros;
++ * use this structure to change default settings
++ */
++struct dpdmux_cfg {
++ enum dpdmux_method method;
++ enum dpdmux_manip manip;
++ uint16_t num_ifs;
++ /**
++ * struct adv - Advanced parameters
++ * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
++ * @max_dmat_entries: Maximum entries in DPDMUX address table
++ * 0 - indicates default: 64 entries per interface.
++ * @max_mc_groups: Number of multicast groups in DPDMUX table
++ * 0 - indicates default: 32 multicast groups
++ * @max_vlan_ids: max vlan ids allowed in the system -
++ * relevant only case of working in mac+vlan method.
++ * 0 - indicates default 16 vlan ids.
++ */
++ struct {
++ uint64_t options;
++ uint16_t max_dmat_entries;
++ uint16_t max_mc_groups;
++ uint16_t max_vlan_ids;
++ } adv;
++};
++
++/**
++ * dpdmux_create() - Create the DPDMUX object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPDMUX object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpdmux_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpdmux_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpdmux_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmux_enable() - Enable DPDMUX functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmux_disable() - Disable DPDMUX functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpdmux_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpdmux_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpdmux_set_irq() - Set IRQ information for the DPDMUX to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpdmux_irq_cfg *irq_cfg);
++
++/**
++ * dpdmux_get_irq() - Get IRQ information from the DPDMUX.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpdmux_irq_cfg *irq_cfg);
++
++/**
++ * dpdmux_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpdmux_get_irq_enable() - Get overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpdmux_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpdmux_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpdmux_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpdmux_attr - Structure representing DPDMUX attributes
++ * @id: DPDMUX object ID
++ * @version: DPDMUX version
++ * @options: Configuration options (bitmap)
++ * @method: DPDMUX address table method
++ * @manip: DPDMUX manipulation type
++ * @num_ifs: Number of interfaces (excluding the uplink interface)
++ * @mem_size: DPDMUX frame storage memory size
++ */
++struct dpdmux_attr {
++ int id;
++ /**
++ * struct version - DPDMUX version
++ * @major: DPDMUX major version
++ * @minor: DPDMUX minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint64_t options;
++ enum dpdmux_method method;
++ enum dpdmux_manip manip;
++ uint16_t num_ifs;
++ uint16_t mem_size;
++};
++
++/**
++ * dpdmux_get_attributes() - Retrieve DPDMUX attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpdmux_attr *attr);
++
++/**
++ * dpdmux_ul_set_max_frame_length() - Set the maximum frame length in DPDMUX
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @max_frame_length: The required maximum frame length
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t max_frame_length);
++
++/**
++ * enum dpdmux_counter_type - Counter types
++ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
++ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
++ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
++ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
++ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
++ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
++ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
++ */
++enum dpdmux_counter_type {
++ DPDMUX_CNT_ING_FRAME = 0x0,
++ DPDMUX_CNT_ING_BYTE = 0x1,
++ DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
++ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
++ DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
++ DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
++ DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
++ DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
++ DPDMUX_CNT_EGR_FRAME = 0x8,
++ DPDMUX_CNT_EGR_BYTE = 0x9,
++ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
++};
++
++/**
++ * enum dpdmux_accepted_frames_type - DPDMUX frame types
++ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
++ * priority-tagged frames
++ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
++ * priority-tagged frames that are received on this
++ * interface
++ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
++ * received on this interface are accepted
++ */
++enum dpdmux_accepted_frames_type {
++ DPDMUX_ADMIT_ALL = 0,
++ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
++ DPDMUX_ADMIT_ONLY_UNTAGGED = 2
++};
++
++/**
++ * enum dpdmux_action - DPDMUX action for un-accepted frames
++ * @DPDMUX_ACTION_DROP: Drop un-accepted frames
++ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
++ * control interface
++ */
++enum dpdmux_action {
++ DPDMUX_ACTION_DROP = 0,
++ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
++};
++
++/**
++ * struct dpdmux_accepted_frames - Frame types configuration
++ * @type: Defines ingress accepted frames
++ * @unaccept_act: Defines action on frames not accepted
++ */
++struct dpdmux_accepted_frames {
++ enum dpdmux_accepted_frames_type type;
++ enum dpdmux_action unaccept_act;
++};
++
++/**
++ * dpdmux_if_set_accepted_frames() - Set the accepted frame types
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
++ * @cfg: Frame types configuration
++ *
++ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
++ * priority-tagged frames are discarded.
++ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
++ * priority-tagged frames are accepted.
++ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
++ * untagged and priority-tagged frame are accepted;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_accepted_frames *cfg);
++
++/**
++ * struct dpdmux_if_attr - Structure representing frame types configuration
++ * @rate: Configured interface rate (in bits per second)
++ * @enabled: Indicates if interface is enabled
++ * @accept_frame_type: Indicates type of accepted frames for the interface
++ */
++struct dpdmux_if_attr {
++ uint32_t rate;
++ int enabled;
++ enum dpdmux_accepted_frames_type accept_frame_type;
++};
++
++/**
++ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
++ * @attr: Interface attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_if_attr *attr);
++
++/**
++ * struct dpdmux_l2_rule - Structure representing L2 rule
++ * @mac_addr: MAC address
++ * @vlan_id: VLAN ID
++ */
++struct dpdmux_l2_rule {
++ uint8_t mac_addr[6];
++ uint16_t vlan_id;
++};
++
++/**
++ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Destination interface ID
++ * @rule: L2 rule
++ *
++ * Function removes a L2 rule from DPDMUX table
++ * or adds an interface to an existing multicast address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_l2_rule *rule);
++
++/**
++ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPDMUX object
++ * @if_id: Destination interface ID
++ * @rule: L2 rule
++ *
++ * Function adds a L2 rule into DPDMUX table
++ * or adds an interface to an existing multicast address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpdmux_l2_rule *rule);
++
++/**
++* dpdmux_if_get_counter() - Functions obtains specific counter of an interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPDMUX object
++* @if_id: Interface Id
++* @counter_type: counter type
++* @counter: Returned specific counter information
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpdmux_counter_type counter_type,
++ uint64_t *counter);
++
++/**
++* dpdmux_ul_reset_counters() - Function resets the uplink counter
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPDMUX object
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
++ */
++struct dpdmux_link_cfg {
++ uint32_t rate;
++ uint64_t options;
++};
++
++/**
++ * dpdmux_if_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_link_cfg *cfg);
++/**
++ * struct dpdmux_link_state - Structure representing DPDMUX link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
++ * @up: 0 - down, 1 - up
++ */
++struct dpdmux_link_state {
++ uint32_t rate;
++ uint64_t options;
++ int up;
++};
++
++/**
++ * dpdmux_if_get_link_state - Return the link state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @state: link state
++ *
++ * @returns '0' on Success; Error code otherwise.
++ */
++int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpdmux_link_state *state);
++
++#endif /* __FSL_DPDMUX_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
+new file mode 100644
+index 0000000..0a5cf17
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
+@@ -0,0 +1,256 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPDMUX_CMD_H
++#define _FSL_DPDMUX_CMD_H
++
++/* DPDMUX Version */
++#define DPDMUX_VER_MAJOR 5
++#define DPDMUX_VER_MINOR 0
++
++/* Command IDs */
++#define DPDMUX_CMDID_CLOSE 0x800
++#define DPDMUX_CMDID_OPEN 0x806
++#define DPDMUX_CMDID_CREATE 0x906
++#define DPDMUX_CMDID_DESTROY 0x900
++
++#define DPDMUX_CMDID_ENABLE 0x002
++#define DPDMUX_CMDID_DISABLE 0x003
++#define DPDMUX_CMDID_GET_ATTR 0x004
++#define DPDMUX_CMDID_RESET 0x005
++#define DPDMUX_CMDID_IS_ENABLED 0x006
++
++#define DPDMUX_CMDID_SET_IRQ 0x010
++#define DPDMUX_CMDID_GET_IRQ 0x011
++#define DPDMUX_CMDID_SET_IRQ_ENABLE 0x012
++#define DPDMUX_CMDID_GET_IRQ_ENABLE 0x013
++#define DPDMUX_CMDID_SET_IRQ_MASK 0x014
++#define DPDMUX_CMDID_GET_IRQ_MASK 0x015
++#define DPDMUX_CMDID_GET_IRQ_STATUS 0x016
++#define DPDMUX_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH 0x0a1
++
++#define DPDMUX_CMDID_UL_RESET_COUNTERS 0x0a3
++
++#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES 0x0a7
++#define DPDMUX_CMDID_IF_GET_ATTR 0x0a8
++
++#define DPDMUX_CMDID_IF_ADD_L2_RULE 0x0b0
++#define DPDMUX_CMDID_IF_REMOVE_L2_RULE 0x0b1
++#define DPDMUX_CMDID_IF_GET_COUNTER 0x0b2
++#define DPDMUX_CMDID_IF_SET_LINK_CFG 0x0b3
++#define DPDMUX_CMDID_IF_GET_LINK_STATE 0x0b4
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_OPEN(cmd, dpdmux_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmux_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, enum dpdmux_method, cfg->method);\
++ MC_CMD_OP(cmd, 0, 8, 8, enum dpdmux_manip, cfg->manip);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_dmat_entries);\
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_mc_groups);\
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.max_vlan_ids);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) \
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++#define DPDMUX_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 8, enum dpdmux_method, attr->method);\
++ MC_RSP_OP(cmd, 0, 8, 8, enum dpdmux_manip, attr->manip);\
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->num_ifs);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->mem_size);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\
++ MC_RSP_OP(cmd, 4, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 4, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 4, enum dpdmux_accepted_frames_type, cfg->type);\
++ MC_CMD_OP(cmd, 0, 20, 4, enum dpdmux_unaccepted_frames_action, \
++ cfg->unaccept_act);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_GET_ATTR(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_IF_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 56, 4, enum dpdmux_accepted_frames_type, \
++ attr->accept_frame_type);\
++ MC_RSP_OP(cmd, 0, 24, 1, int, attr->enabled);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rate);\
++} while (0)
++
++#define DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, l2_rule) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\
++} while (0)
++
++#define DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, l2_rule) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, enum dpdmux_counter_type, counter_type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_IF_GET_COUNTER(cmd, counter) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
++} while (0)
++
++#endif /* _FSL_DPDMUX_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpio.h b/drivers/net/dpaa2/mc/fsl_dpio.h
+new file mode 100644
+index 0000000..88a492f
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpio.h
+@@ -0,0 +1,460 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPIO_H
++#define __FSL_DPIO_H
++
++/* Data Path I/O Portal API
++ * Contains initialization APIs and runtime control APIs for DPIO
++ */
++
++struct fsl_mc_io;
++
++/**
++ * dpio_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpio_id: DPIO unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpio_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpio_id,
++ uint16_t *token);
++
++/**
++ * dpio_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * enum dpio_channel_mode - DPIO notification channel mode
++ * @DPIO_NO_CHANNEL: No support for notification channel
++ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
++ * dedicated channel in the DPIO; user should point the queue's
++ * destination in the relevant interface to this DPIO
++ */
++enum dpio_channel_mode {
++ DPIO_NO_CHANNEL = 0,
++ DPIO_LOCAL_CHANNEL = 1,
++};
++
++/**
++ * struct dpio_cfg - Structure representing DPIO configuration
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ */
++struct dpio_cfg {
++ enum dpio_channel_mode channel_mode;
++ uint8_t num_priorities;
++};
++
++/**
++ * dpio_create() - Create the DPIO object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPIO object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpio_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpio_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpio_destroy() - Destroy the DPIO object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpio_is_enabled() - Check if the DPIO is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpio_reset() - Reset the DPIO, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpio_set_stashing_destination() - Set the stashing destination.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @sdest: stashing destination value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t sdest);
++
++/**
++ * dpio_get_stashing_destination() - Get the stashing destination..
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @sdest: Returns the stashing destination value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t *sdest);
++
++/**
++ * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @dpcon_id: DPCON object ID
++ * @channel_index: Returned channel index to be used in qbman API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id,
++ uint8_t *channel_index);
++
++/**
++ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @dpcon_id: DPCON object ID
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int dpcon_id);
++
++/**
++ * DPIO IRQ Index and Events
++ */
++
++/**
++ * Irq software-portal index
++ */
++#define DPIO_IRQ_SWP_INDEX 0
++
++/**
++ * struct dpio_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpio_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpio_irq_cfg *irq_cfg);
++
++/**
++ * dpio_get_irq() - Get IRQ information from the DPIO.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpio_irq_cfg *irq_cfg);
++
++/**
++ * dpio_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpio_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpio_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpio_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpio_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpio_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpio_attr - Structure representing DPIO attributes
++ * @id: DPIO object ID
++ * @version: DPIO version
++ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
++ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
++ * @qbman_portal_id: Software portal ID
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ * @qbman_version: QBMAN version
++ */
++struct dpio_attr {
++ int id;
++ /**
++ * struct version - DPIO version
++ * @major: DPIO major version
++ * @minor: DPIO minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint64_t qbman_portal_ce_offset;
++ uint64_t qbman_portal_ci_offset;
++ uint16_t qbman_portal_id;
++ enum dpio_channel_mode channel_mode;
++ uint8_t num_priorities;
++ uint32_t qbman_version;
++};
++
++/**
++ * dpio_get_attributes() - Retrieve DPIO attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpio_attr *attr);
++#endif /* __FSL_DPIO_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpio_cmd.h b/drivers/net/dpaa2/mc/fsl_dpio_cmd.h
+new file mode 100644
+index 0000000..f339cd6
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpio_cmd.h
+@@ -0,0 +1,184 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPIO_CMD_H
++#define _FSL_DPIO_CMD_H
++
++/* DPIO Version */
++#define DPIO_VER_MAJOR 3
++#define DPIO_VER_MINOR 2
++
++/* Command IDs */
++#define DPIO_CMDID_CLOSE 0x800
++#define DPIO_CMDID_OPEN 0x803
++#define DPIO_CMDID_CREATE 0x903
++#define DPIO_CMDID_DESTROY 0x900
++
++#define DPIO_CMDID_ENABLE 0x002
++#define DPIO_CMDID_DISABLE 0x003
++#define DPIO_CMDID_GET_ATTR 0x004
++#define DPIO_CMDID_RESET 0x005
++#define DPIO_CMDID_IS_ENABLED 0x006
++
++#define DPIO_CMDID_SET_IRQ 0x010
++#define DPIO_CMDID_GET_IRQ 0x011
++#define DPIO_CMDID_SET_IRQ_ENABLE 0x012
++#define DPIO_CMDID_GET_IRQ_ENABLE 0x013
++#define DPIO_CMDID_SET_IRQ_MASK 0x014
++#define DPIO_CMDID_GET_IRQ_MASK 0x015
++#define DPIO_CMDID_GET_IRQ_STATUS 0x016
++#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPIO_CMDID_SET_STASHING_DEST 0x120
++#define DPIO_CMDID_GET_STASHING_DEST 0x121
++#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122
++#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_OPEN(cmd, dpio_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \
++ cfg->channel_mode);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\
++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
++ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\
++ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\
++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
++#endif /* _FSL_DPIO_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
+new file mode 100644
+index 0000000..b2bceaf
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
+@@ -0,0 +1,174 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPKG_H_
++#define __FSL_DPKG_H_
++
++#include <fsl_net.h>
++
++/* Data Path Key Generator API
++ * Contains initialization APIs and runtime APIs for the Key Generator
++ */
++
++/** Key Generator properties */
++
++/**
++ * Number of masks per key extraction
++ */
++#define DPKG_NUM_OF_MASKS 4
++/**
++ * Number of extractions per key profile
++ */
++#define DPKG_MAX_NUM_OF_EXTRACTS 10
++
++/**
++ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
++ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
++ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
++ * @DPKG_FULL_FIELD: Extract a full field
++ */
++enum dpkg_extract_from_hdr_type {
++ DPKG_FROM_HDR = 0,
++ DPKG_FROM_FIELD = 1,
++ DPKG_FULL_FIELD = 2
++};
++
++/**
++ * enum dpkg_extract_type - Enumeration for selecting extraction type
++ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
++ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
++ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
++ * e.g. can be used to extract header existence;
++ * please refer to 'Parse Result definition' section in the parser BG
++ */
++enum dpkg_extract_type {
++ DPKG_EXTRACT_FROM_HDR = 0,
++ DPKG_EXTRACT_FROM_DATA = 1,
++ DPKG_EXTRACT_FROM_PARSE = 3
++};
++
++/**
++ * struct dpkg_mask - A structure for defining a single extraction mask
++ * @mask: Byte mask for the extracted content
++ * @offset: Offset within the extracted content
++ */
++struct dpkg_mask {
++ uint8_t mask;
++ uint8_t offset;
++};
++
++/**
++ * struct dpkg_extract - A structure for defining a single extraction
++ * @type: Determines how the union below is interpreted:
++ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
++ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
++ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
++ * @extract: Selects extraction method
++ * @num_of_byte_masks: Defines the number of valid entries in the array below;
++ * This is also the number of bytes to be used as masks
++ * @masks: Masks parameters
++ */
++struct dpkg_extract {
++ enum dpkg_extract_type type;
++ /**
++ * union extract - Selects extraction method
++ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
++ */
++ union {
++ /**
++ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @prot: Any of the supported headers
++ * @type: Defines the type of header extraction:
++ * DPKG_FROM_HDR: use size & offset below;
++ * DPKG_FROM_FIELD: use field, size and offset below;
++ * DPKG_FULL_FIELD: use field below
++ * @field: One of the supported fields (NH_FLD_)
++ *
++ * @size: Size in bytes
++ * @offset: Byte offset
++ * @hdr_index: Clear for cases not listed below;
++ * Used for protocols that may have more than a single
++ * header, 0 indicates an outer header;
++ * Supported protocols (possible values):
++ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
++ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
++ * NET_PROT_IP(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
++ */
++
++ struct {
++ enum net_prot prot;
++ enum dpkg_extract_from_hdr_type type;
++ uint32_t field;
++ uint8_t size;
++ uint8_t offset;
++ uint8_t hdr_index;
++ } from_hdr;
++ /**
++ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ uint8_t size;
++ uint8_t offset;
++ } from_data;
++
++ /**
++ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ uint8_t size;
++ uint8_t offset;
++ } from_parse;
++ } extract;
++
++ uint8_t num_of_byte_masks;
++ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
++};
++
++/**
++ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
++ * profile (rule)
++ * @num_extracts: Defines the number of valid entries in the array below
++ * @extracts: Array of required extractions
++ */
++struct dpkg_profile_cfg {
++ uint8_t num_extracts;
++ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++};
++
++#endif /* __FSL_DPKG_H_ */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpmac.h b/drivers/net/dpaa2/mc/fsl_dpmac.h
+new file mode 100644
+index 0000000..ad27772
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpmac.h
+@@ -0,0 +1,593 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPMAC_H
++#define __FSL_DPMAC_H
++
++/* Data Path MAC API
++ * Contains initialization APIs and runtime control APIs for DPMAC
++ */
++
++struct fsl_mc_io;
++
++/**
++ * dpmac_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpmac_id: DPMAC unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpmac_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpmac_id,
++ uint16_t *token);
++
++/**
++ * dpmac_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * enum dpmac_link_type - DPMAC link type
++ * @DPMAC_LINK_TYPE_NONE: No link
++ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
++ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
++ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
++ */
++enum dpmac_link_type {
++ DPMAC_LINK_TYPE_NONE,
++ DPMAC_LINK_TYPE_FIXED,
++ DPMAC_LINK_TYPE_PHY,
++ DPMAC_LINK_TYPE_BACKPLANE
++};
++
++/**
++ * enum dpmac_eth_if - DPMAC Ethrnet interface
++ * @DPMAC_ETH_IF_MII: MII interface
++ * @DPMAC_ETH_IF_RMII: RMII interface
++ * @DPMAC_ETH_IF_SMII: SMII interface
++ * @DPMAC_ETH_IF_GMII: GMII interface
++ * @DPMAC_ETH_IF_RGMII: RGMII interface
++ * @DPMAC_ETH_IF_SGMII: SGMII interface
++ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
++ * @DPMAC_ETH_IF_XAUI: XAUI interface
++ * @DPMAC_ETH_IF_XFI: XFI interface
++ */
++enum dpmac_eth_if {
++ DPMAC_ETH_IF_MII,
++ DPMAC_ETH_IF_RMII,
++ DPMAC_ETH_IF_SMII,
++ DPMAC_ETH_IF_GMII,
++ DPMAC_ETH_IF_RGMII,
++ DPMAC_ETH_IF_SGMII,
++ DPMAC_ETH_IF_QSGMII,
++ DPMAC_ETH_IF_XAUI,
++ DPMAC_ETH_IF_XFI
++};
++
++/**
++ * struct dpmac_cfg - Structure representing DPMAC configuration
++ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
++ * the MAC IDs are continuous.
++ * For example: 2 WRIOPs, 16 MACs in each:
++ * MAC IDs for the 1st WRIOP: 1-16,
++ * MAC IDs for the 2nd WRIOP: 17-32.
++ */
++struct dpmac_cfg {
++ int mac_id;
++};
++
++/**
++ * dpmac_create() - Create the DPMAC object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPMAC object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpmac_open function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpmac_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpmac_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPMAC IRQ Index and Events
++ */
++
++/**
++ * IRQ index
++ */
++#define DPMAC_IRQ_INDEX 0
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
++
++/**
++ * struct dpmac_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpmac_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpmac_irq_cfg *irq_cfg);
++
++/**
++ * dpmac_get_irq() - Get IRQ information from the DPMAC.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpmac_irq_cfg *irq_cfg);
++
++/**
++ * dpmac_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpmac_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpmac_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpmac_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpmac_get_irq_status() - Get the current status of any pending interrupts.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpmac_clear_irq_status() - Clear a pending interrupt's status
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @irq_index: The interrupt index to configure
++ * @status: Bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpmac_attr - Structure representing DPMAC attributes
++ * @id: DPMAC object ID
++ * @phy_id: PHY ID
++ * @link_type: link type
++ * @eth_if: Ethernet interface
++ * @max_rate: Maximum supported rate - in Mbps
++ * @version: DPMAC version
++ */
++struct dpmac_attr {
++ int id;
++ int phy_id;
++ enum dpmac_link_type link_type;
++ enum dpmac_eth_if eth_if;
++ uint32_t max_rate;
++ /**
++ * struct version - Structure representing DPMAC version
++ * @major: DPMAC major version
++ * @minor: DPMAC minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++};
++
++/**
++ * dpmac_get_attributes - Retrieve DPMAC attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_attr *attr);
++
++/**
++ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters
++ * @phy_addr: MDIO device address
++ * @reg: Address of the register within the Clause 45 PHY device from which data
++ * is to be read
++ * @data: Data read/write from/to MDIO
++ */
++struct dpmac_mdio_cfg {
++ uint8_t phy_addr;
++ uint8_t reg;
++ uint16_t data;
++};
++
++/**
++ * dpmac_mdio_read() - Perform MDIO read transaction
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @cfg: Structure with MDIO transaction parameters
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_mdio_read(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_mdio_cfg *cfg);
++
++/**
++ * dpmac_mdio_write() - Perform MDIO write transaction
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @cfg: Structure with MDIO transaction parameters
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_mdio_write(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_mdio_cfg *cfg);
++
++/**
++ * DPMAC link configuration/state options
++ */
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpmac_link_cfg - Structure representing DPMAC link configuration
++ * @rate: Link's rate - in Mbps
++ * @options: Enable/Disable DPMAC link cfg features (bitmap)
++ */
++struct dpmac_link_cfg {
++ uint32_t rate;
++ uint64_t options;
++};
++
++/**
++ * dpmac_get_link_cfg() - Get Ethernet link configuration
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @cfg: Returned structure with the link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_link_cfg *cfg);
++
++/**
++ * struct dpmac_link_state - DPMAC link configuration request
++ * @rate: Rate in Mbps
++ * @options: Enable/Disable DPMAC link cfg features (bitmap)
++ * @up: Link state
++ */
++struct dpmac_link_state {
++ uint32_t rate;
++ uint64_t options;
++ int up;
++};
++
++/**
++ * dpmac_set_link_state() - Set the Ethernet link status
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @link_state: Link state configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmac_link_state *link_state);
++
++/**
++ * enum dpmac_counter - DPMAC counter types
++ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
++ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
++ * (up to max frame length specified),
++ * good or bad.
++ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
++ * with a wrong CRC
++ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
++ * specified, with a bad frame check sequence.
++ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
++ * Occurs when a receive FIFO overflows.
++ * Includes also frames truncated as a result of
++ * the receive FIFO overflow.
++ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
++ * (optional used for wrong SFD).
++ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
++ * bytes long with a good CRC.
++ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
++ * specified, with a good frame check sequence.
++ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
++ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
++ * (regular and PFC).
++ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
++ * frames and valid pause frames.
++ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
++ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
++ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
++ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
++ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
++ * (except for undersized/fragment frame).
++ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
++ * frames and valid pause frames transmitted.
++ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
++ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
++ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
++ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
++ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
++ * pause frames.
++ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
++ * pause frames.
++ */
++enum dpmac_counter {
++ DPMAC_CNT_ING_FRAME_64,
++ DPMAC_CNT_ING_FRAME_127,
++ DPMAC_CNT_ING_FRAME_255,
++ DPMAC_CNT_ING_FRAME_511,
++ DPMAC_CNT_ING_FRAME_1023,
++ DPMAC_CNT_ING_FRAME_1518,
++ DPMAC_CNT_ING_FRAME_1519_MAX,
++ DPMAC_CNT_ING_FRAG,
++ DPMAC_CNT_ING_JABBER,
++ DPMAC_CNT_ING_FRAME_DISCARD,
++ DPMAC_CNT_ING_ALIGN_ERR,
++ DPMAC_CNT_EGR_UNDERSIZED,
++ DPMAC_CNT_ING_OVERSIZED,
++ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
++ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
++ DPMAC_CNT_ING_BYTE,
++ DPMAC_CNT_ING_MCAST_FRAME,
++ DPMAC_CNT_ING_BCAST_FRAME,
++ DPMAC_CNT_ING_ALL_FRAME,
++ DPMAC_CNT_ING_UCAST_FRAME,
++ DPMAC_CNT_ING_ERR_FRAME,
++ DPMAC_CNT_EGR_BYTE,
++ DPMAC_CNT_EGR_MCAST_FRAME,
++ DPMAC_CNT_EGR_BCAST_FRAME,
++ DPMAC_CNT_EGR_UCAST_FRAME,
++ DPMAC_CNT_EGR_ERR_FRAME,
++ DPMAC_CNT_ING_GOOD_FRAME,
++ DPMAC_CNT_ENG_GOOD_FRAME
++};
++
++/**
++ * dpmac_get_counter() - Read a specific DPMAC counter
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @type: The requested counter
++ * @counter: Returned counter value
++ *
++ * Return: The requested counter; '0' otherwise.
++ */
++int dpmac_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpmac_counter type,
++ uint64_t *counter);
++
++#endif /* __FSL_DPMAC_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h
+new file mode 100644
+index 0000000..dc00590
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h
+@@ -0,0 +1,195 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPMAC_CMD_H
++#define _FSL_DPMAC_CMD_H
++
++/* DPMAC Version */
++#define DPMAC_VER_MAJOR 3
++#define DPMAC_VER_MINOR 2
++
++/* Command IDs */
++#define DPMAC_CMDID_CLOSE 0x800
++#define DPMAC_CMDID_OPEN 0x80c
++#define DPMAC_CMDID_CREATE 0x90c
++#define DPMAC_CMDID_DESTROY 0x900
++
++#define DPMAC_CMDID_GET_ATTR 0x004
++#define DPMAC_CMDID_RESET 0x005
++
++#define DPMAC_CMDID_SET_IRQ 0x010
++#define DPMAC_CMDID_GET_IRQ 0x011
++#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012
++#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013
++#define DPMAC_CMDID_SET_IRQ_MASK 0x014
++#define DPMAC_CMDID_GET_IRQ_MASK 0x015
++#define DPMAC_CMDID_GET_IRQ_STATUS 0x016
++#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPMAC_CMDID_MDIO_READ 0x0c0
++#define DPMAC_CMDID_MDIO_WRITE 0x0c1
++#define DPMAC_CMDID_GET_LINK_CFG 0x0c2
++#define DPMAC_CMDID_SET_LINK_STATE 0x0c3
++#define DPMAC_CMDID_GET_COUNTER 0x0c4
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_CREATE(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_OPEN(cmd, dpmac_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\
++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\
++ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_MDIO_READ(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_MDIO_READ(cmd, data) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \
++ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_CMD_GET_COUNTER(cmd, type) \
++ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMAC_RSP_GET_COUNTER(cmd, counter) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
++
++#endif /* _FSL_DPMAC_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpmcp.h b/drivers/net/dpaa2/mc/fsl_dpmcp.h
+new file mode 100644
+index 0000000..80f238e
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpmcp.h
+@@ -0,0 +1,332 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPMCP_H
++#define __FSL_DPMCP_H
++
++/* Data Path Management Command Portal API
++ * Contains initialization APIs and runtime control APIs for DPMCP
++ */
++
++struct fsl_mc_io;
++
++/**
++ * dpmcp_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpmcp_id: DPMCP unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpmcp_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpmcp_id,
++ uint16_t *token);
++
++/**
++ * Get portal ID from pool
++ */
++#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
++
++/**
++ * dpmcp_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpmcp_cfg - Structure representing DPMCP configuration
++ * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID
++ * from pool
++ */
++struct dpmcp_cfg {
++ int portal_id;
++};
++
++/**
++ * dpmcp_create() - Create the DPMCP object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPMCP object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpmcp_open function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpmcp_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpmcp_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * IRQ
++ */
++
++/**
++ * IRQ Index
++ */
++#define DPMCP_IRQ_INDEX 0
++/**
++ * irq event - Indicates that the link state changed
++ */
++#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001
++
++/**
++ * struct dpmcp_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpmcp_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpmcp_irq_cfg *irq_cfg);
++
++/**
++ * dpmcp_get_irq() - Get IRQ information from the DPMCP.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpmcp_irq_cfg *irq_cfg);
++
++/**
++ * dpmcp_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpmcp_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpmcp_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpmcp_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpmcp_get_irq_status() - Get the current status of any pending interrupts.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * struct dpmcp_attr - Structure representing DPMCP attributes
++ * @id: DPMCP object ID
++ * @version: DPMCP version
++ */
++struct dpmcp_attr {
++ int id;
++ /**
++ * struct version - Structure representing DPMCP version
++ * @major: DPMCP major version
++ * @minor: DPMCP minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++};
++
++/**
++ * dpmcp_get_attributes - Retrieve DPMCP attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpmcp_attr *attr);
++
++#endif /* __FSL_DPMCP_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h
+new file mode 100644
+index 0000000..8f710bd
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h
+@@ -0,0 +1,135 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPMCP_CMD_H
++#define _FSL_DPMCP_CMD_H
++
++/* DPMCP Version */
++#define DPMCP_VER_MAJOR 3
++#define DPMCP_VER_MINOR 0
++
++/* Command IDs */
++#define DPMCP_CMDID_CLOSE 0x800
++#define DPMCP_CMDID_OPEN 0x80b
++#define DPMCP_CMDID_CREATE 0x90b
++#define DPMCP_CMDID_DESTROY 0x900
++
++#define DPMCP_CMDID_GET_ATTR 0x004
++#define DPMCP_CMDID_RESET 0x005
++
++#define DPMCP_CMDID_SET_IRQ 0x010
++#define DPMCP_CMDID_GET_IRQ 0x011
++#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
++#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
++#define DPMCP_CMDID_SET_IRQ_MASK 0x014
++#define DPMCP_CMDID_GET_IRQ_MASK 0x015
++#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_OPEN(cmd, dpmcp_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpmcp_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_CREATE(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->portal_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMCP_RSP_GET_ATTRIBUTES(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++#endif /* _FSL_DPMCP_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpmng.h b/drivers/net/dpaa2/mc/fsl_dpmng.h
+new file mode 100644
+index 0000000..4468dea
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpmng.h
+@@ -0,0 +1,74 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPMNG_H
++#define __FSL_DPMNG_H
++
++/* Management Complex General API
++ * Contains general API for the Management Complex firmware
++ */
++
++struct fsl_mc_io;
++
++/**
++ * Management Complex firmware version information
++ */
++#define MC_VER_MAJOR 9
++#define MC_VER_MINOR 0
++
++/**
++ * struct mc_versoin
++ * @major: Major version number: incremented on API compatibility changes
++ * @minor: Minor version number: incremented on API additions (that are
++ * backward compatible); reset when major version is incremented
++ * @revision: Internal revision number: incremented on implementation changes
++ * and/or bug fixes that have no impact on API
++ */
++struct mc_version {
++ uint32_t major;
++ uint32_t minor;
++ uint32_t revision;
++};
++
++/**
++ * mc_get_version() - Retrieves the Management Complex firmware
++ * version information
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @mc_ver_info: Returned version information structure
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int mc_get_version(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ struct mc_version *mc_ver_info);
++
++#endif /* __FSL_DPMNG_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h
+new file mode 100644
+index 0000000..c34ca3a
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h
+@@ -0,0 +1,46 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPMNG_CMD_H
++#define __FSL_DPMNG_CMD_H
++
++/* Command IDs */
++#define DPMNG_CMDID_GET_VERSION 0x831
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPMNG_RSP_GET_VERSION(cmd, mc_ver_info) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_ver_info->revision); \
++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_ver_info->major); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, mc_ver_info->minor); \
++} while (0)
++
++#endif /* __FSL_DPMNG_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
+new file mode 100644
+index 0000000..c820086
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpni.h
+@@ -0,0 +1,2581 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPNI_H
++#define __FSL_DPNI_H
++
++#include <fsl_dpkg.h>
++
++struct fsl_mc_io;
++
++/**
++ * Data Path Network Interface API
++ * Contains initialization APIs and runtime control APIs for DPNI
++ */
++
++/** General DPNI macros */
++
++/**
++ * Maximum number of traffic classes
++ */
++#define DPNI_MAX_TC 8
++/**
++ * Maximum number of buffer pools per DPNI
++ */
++#define DPNI_MAX_DPBP 8
++/**
++ * Maximum number of storage-profiles per DPNI
++ */
++#define DPNI_MAX_SP 2
++
++/**
++ * All traffic classes considered; see dpni_set_rx_flow()
++ */
++#define DPNI_ALL_TCS (uint8_t)(-1)
++/**
++ * All flows within traffic class considered; see dpni_set_rx_flow()
++ */
++#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
++/**
++ * Generate new flow ID; see dpni_set_tx_flow()
++ */
++#define DPNI_NEW_FLOW_ID (uint16_t)(-1)
++/* use for common tx-conf queue; see dpni_set_tx_conf_<x>() */
++#define DPNI_COMMON_TX_CONF (uint16_t)(-1)
++
++/**
++ * dpni_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpni_id: DPNI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpni_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpni_id,
++ uint16_t *token);
++
++/**
++ * dpni_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/* DPNI configuration options */
++
++/**
++ * Allow different distribution key profiles for different traffic classes;
++ * if not set, a single key profile is assumed
++ */
++#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001
++
++/**
++ * Disable all non-error transmit confirmation; error frames are reported
++ * back to a common Tx error queue
++ */
++#define DPNI_OPT_TX_CONF_DISABLED 0x00000002
++
++/**
++ * Disable per-sender private Tx confirmation/error queue
++ */
++#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004
++
++/**
++ * Support distribution based on hashed key;
++ * allows statistical distribution over receive queues in a traffic class
++ */
++#define DPNI_OPT_DIST_HASH 0x00000010
++
++/**
++ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are
++ * '0' then backward compatibility is preserved;
++ * Support distribution based on flow steering;
++ * allows explicit control of distribution over receive queues in a traffic
++ * class
++ */
++#define DPNI_OPT_DIST_FS 0x00000020
++
++/**
++ * Unicast filtering support
++ */
++#define DPNI_OPT_UNICAST_FILTER 0x00000080
++/**
++ * Multicast filtering support
++ */
++#define DPNI_OPT_MULTICAST_FILTER 0x00000100
++/**
++ * VLAN filtering support
++ */
++#define DPNI_OPT_VLAN_FILTER 0x00000200
++/**
++ * Support IP reassembly on received packets
++ */
++#define DPNI_OPT_IPR 0x00000800
++/**
++ * Support IP fragmentation on transmitted packets
++ */
++#define DPNI_OPT_IPF 0x00001000
++/**
++ * VLAN manipulation support
++ */
++#define DPNI_OPT_VLAN_MANIPULATION 0x00010000
++/**
++ * Support masking of QoS lookup keys
++ */
++#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000
++/**
++ * Support masking of Flow Steering lookup keys
++ */
++#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000
++
++/**
++ * struct dpni_extended_cfg - Structure representing extended DPNI configuration
++ * @tc_cfg: TCs configuration
++ * @ipr_cfg: IP reassembly configuration
++ */
++struct dpni_extended_cfg {
++ /**
++ * struct tc_cfg - TC configuration
++ * @max_dist: Maximum distribution size for Rx traffic class;
++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
++ * 112,128,192,224,256,384,448,512,768,896,1024;
++ * value '0' will be treated as '1'.
++ * other unsupported values will be round down to the nearest
++ * supported value.
++ * @max_fs_entries: Maximum FS entries for Rx traffic class;
++ * '0' means no support for this TC;
++ */
++ struct {
++ uint16_t max_dist;
++ uint16_t max_fs_entries;
++ } tc_cfg[DPNI_MAX_TC];
++ /**
++ * struct ipr_cfg - Structure representing IP reassembly configuration
++ * @max_reass_frm_size: Maximum size of the reassembled frame
++ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments
++ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments
++ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly
++ * process
++ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly
++ * process
++ */
++ struct {
++ uint16_t max_reass_frm_size;
++ uint16_t min_frag_size_ipv4;
++ uint16_t min_frag_size_ipv6;
++ uint16_t max_open_frames_ipv4;
++ uint16_t max_open_frames_ipv6;
++ } ipr_cfg;
++};
++
++/**
++ * dpni_prepare_extended_cfg() - function prepare extended parameters
++ * @cfg: extended structure
++ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before dpni_create()
++ */
++int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg,
++ uint8_t *ext_cfg_buf);
++
++/**
++ * struct dpni_cfg - Structure representing DPNI configuration
++ * @mac_addr: Primary MAC address
++ * @adv: Advanced parameters; default is all zeros;
++ * use this structure to change default settings
++ */
++struct dpni_cfg {
++ uint8_t mac_addr[6];
++ /**
++ * struct adv - Advanced parameters
++ * @options: Mask of available options; use 'DPNI_OPT_<X>' values
++ * @start_hdr: Selects the packet starting header for parsing;
++ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH'
++ * @max_senders: Maximum number of different senders; used as the number
++ * of dedicated Tx flows; Non-power-of-2 values are rounded
++ * up to the next power-of-2 value as hardware demands it;
++ * '0' will be treated as '1'
++ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx);
++ * '0' will e treated as '1'
++ * @max_unicast_filters: Maximum number of unicast filters;
++ * '0' is treated as '16'
++ * @max_multicast_filters: Maximum number of multicast filters;
++ * '0' is treated as '64'
++ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in
++ * the QoS table; '0' is treated as '64'
++ * @max_qos_key_size: Maximum key size for the QoS look-up;
++ * '0' is treated as '24' which is enough for IPv4
++ * 5-tuple
++ * @max_dist_key_size: Maximum key size for the distribution;
++ * '0' is treated as '24' which is enough for IPv4 5-tuple
++ * @max_policers: Maximum number of policers;
++ * should be between '0' and max_tcs
++ * @max_congestion_ctrl: Maximum number of congestion control groups
++ * (CGs); covers early drop and congestion notification
++ * requirements;
++ * should be between '0' and ('max_tcs' + 'max_senders')
++ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory
++ * filled with the extended configuration by calling
++ * dpni_prepare_extended_cfg()
++ */
++ struct {
++ uint32_t options;
++ enum net_prot start_hdr;
++ uint8_t max_senders;
++ uint8_t max_tcs;
++ uint8_t max_unicast_filters;
++ uint8_t max_multicast_filters;
++ uint8_t max_vlan_filters;
++ uint8_t max_qos_entries;
++ uint8_t max_qos_key_size;
++ uint8_t max_dist_key_size;
++ uint8_t max_policers;
++ uint8_t max_congestion_ctrl;
++ uint64_t ext_cfg_iova;
++ } adv;
++};
++
++/**
++ * dpni_create() - Create the DPNI object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPNI object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpni_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpni_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpni_destroy() - Destroy the DPNI object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpni_pools_cfg - Structure representing buffer pools configuration
++ * @num_dpbp: Number of DPBPs
++ * @pools: Array of buffer pools parameters; The number of valid entries
++ * must match 'num_dpbp' value
++ */
++struct dpni_pools_cfg {
++ uint8_t num_dpbp;
++ /**
++ * struct pools - Buffer pools parameters
++ * @dpbp_id: DPBP object ID
++ * @buffer_size: Buffer size
++ * @backup_pool: Backup pool
++ */
++ struct {
++ int dpbp_id;
++ uint16_t buffer_size;
++ int backup_pool;
++ } pools[DPNI_MAX_DPBP];
++};
++
++/**
++ * dpni_set_pools() - Set buffer pools configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Buffer pools configuration
++ *
++ * mandatory for DPNI operation
++ * warning:Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_pools_cfg *cfg);
++
++/**
++ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpni_is_enabled() - Check if the DPNI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_reset() - Reset the DPNI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPNI IRQ Index and Events
++ */
++
++/**
++ * IRQ index
++ */
++#define DPNI_IRQ_INDEX 0
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
++
++/**
++ * struct dpni_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpni_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpni_irq_cfg *irq_cfg);
++
++/**
++ * dpni_get_irq() - Get IRQ information from the DPNI.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpni_irq_cfg *irq_cfg);
++
++/**
++ * dpni_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state: - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpni_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpni_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpni_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpni_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpni_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpni_attr - Structure representing DPNI attributes
++ * @id: DPNI object ID
++ * @version: DPNI version
++ * @start_hdr: Indicates the packet starting header for parsing
++ * @options: Mask of available options; reflects the value as was given in
++ * object's creation
++ * @max_senders: Maximum number of different senders; used as the number
++ * of dedicated Tx flows;
++ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx)
++ * @max_unicast_filters: Maximum number of unicast filters
++ * @max_multicast_filters: Maximum number of multicast filters
++ * @max_vlan_filters: Maximum number of VLAN filters
++ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table
++ * @max_qos_key_size: Maximum key size for the QoS look-up
++ * @max_dist_key_size: Maximum key size for the distribution look-up
++ * @max_policers: Maximum number of policers;
++ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs);
++ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory;
++ * call dpni_extract_extended_cfg() to extract the extended configuration
++ */
++struct dpni_attr {
++ int id;
++ /**
++ * struct version - DPNI version
++ * @major: DPNI major version
++ * @minor: DPNI minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ enum net_prot start_hdr;
++ uint32_t options;
++ uint8_t max_senders;
++ uint8_t max_tcs;
++ uint8_t max_unicast_filters;
++ uint8_t max_multicast_filters;
++ uint8_t max_vlan_filters;
++ uint8_t max_qos_entries;
++ uint8_t max_qos_key_size;
++ uint8_t max_dist_key_size;
++ uint8_t max_policers;
++ uint8_t max_congestion_ctrl;
++ uint64_t ext_cfg_iova;
++};
++
++/**
++ * dpni_get_attributes() - Retrieve DPNI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_attr *attr);
++
++/**
++ * dpni_extract_extended_cfg() - extract the extended parameters
++ * @cfg: extended structure
++ * @ext_cfg_buf: 256 bytes of DMA-able memory
++ *
++ * This function has to be called after dpni_get_attributes()
++ */
++int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg,
++ const uint8_t *ext_cfg_buf);
++
++/**
++ * DPNI errors
++ */
++
++/**
++ * Extract out of frame header error
++ */
++#define DPNI_ERROR_EOFHE 0x00020000
++/**
++ * Frame length error
++ */
++#define DPNI_ERROR_FLE 0x00002000
++/**
++ * Frame physical error
++ */
++#define DPNI_ERROR_FPE 0x00001000
++/**
++ * Parsing header error
++ */
++#define DPNI_ERROR_PHE 0x00000020
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L3CE 0x00000004
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L4CE 0x00000001
++
++/**
++ * enum dpni_error_action - Defines DPNI behavior for errors
++ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
++ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
++ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
++ */
++enum dpni_error_action {
++ DPNI_ERROR_ACTION_DISCARD = 0,
++ DPNI_ERROR_ACTION_CONTINUE = 1,
++ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
++};
++
++/**
++ * struct dpni_error_cfg - Structure representing DPNI errors treatment
++ * @errors: Errors mask; use 'DPNI_ERROR__<X>
++ * @error_action: The desired action for the errors mask
++ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
++ * status (FAS); relevant only for the non-discard action
++ */
++struct dpni_error_cfg {
++ uint32_t errors;
++ enum dpni_error_action error_action;
++ int set_frame_annotation;
++};
++
++/**
++ * dpni_set_errors_behavior() - Set errors behavior
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Errors configuration
++ *
++ * this function may be called numerous times with different
++ * error masks
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_error_cfg *cfg);
++
++/**
++ * DPNI buffer layout modification options
++ */
++
++/**
++ * Select to modify the time-stamp setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
++/**
++ * Select to modify the parser-result setting; not applicable for Tx
++ */
++#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
++/**
++ * Select to modify the frame-status setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
++/**
++ * Select to modify the private-data-size setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
++/**
++ * Select to modify the data-alignment setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
++/**
++ * Select to modify the data-head-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
++/**
++ * Select to modify the data-tail-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
++
++/**
++ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
++ * @options: Flags representing the suggested modifications to the buffer
++ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
++ * @pass_timestamp: Pass timestamp value
++ * @pass_parser_result: Pass parser results
++ * @pass_frame_status: Pass frame status
++ * @private_data_size: Size kept for private data (in bytes)
++ * @data_align: Data alignment
++ * @data_head_room: Data head room
++ * @data_tail_room: Data tail room
++ */
++struct dpni_buffer_layout {
++ uint32_t options;
++ int pass_timestamp;
++ int pass_parser_result;
++ int pass_frame_status;
++ uint16_t private_data_size;
++ uint16_t data_align;
++ uint16_t data_head_room;
++ uint16_t data_tail_room;
++};
++
++/**
++ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout
++ * attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout
++ * configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_buffer_layout *layout);
++
++/**
++ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
++ * for enqueue operations
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qdid: Returned virtual QDID value that should be used as an argument
++ * in all enqueue operations
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *qdid);
++
++/**
++ * struct dpni_sp_info - Structure representing DPNI storage-profile information
++ * (relevant only for DPNI owned by AIOP)
++ * @spids: array of storage-profiles
++ */
++struct dpni_sp_info {
++ uint16_t spids[DPNI_MAX_SP];
++};
++
++/**
++ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @sp_info: Returned AIOP storage-profile information
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Only relevant for DPNI that belongs to AIOP container.
++ */
++int dpni_get_sp_info(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_sp_info *sp_info);
++
++/**
++ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @data_offset: Tx data offset (from start of buffer)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *data_offset);
++
++/**
++ * enum dpni_counter - DPNI counter types
++ * @DPNI_CNT_ING_FRAME: Counts ingress frames
++ * @DPNI_CNT_ING_BYTE: Counts ingress bytes
++ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit
++ * 'drop' setting
++ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors
++ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPNI_CNT_EGR_FRAME: Counts egress frames
++ * @DPNI_CNT_EGR_BYTE: Counts egress bytes
++ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors
++ */
++enum dpni_counter {
++ DPNI_CNT_ING_FRAME = 0x0,
++ DPNI_CNT_ING_BYTE = 0x1,
++ DPNI_CNT_ING_FRAME_DROP = 0x2,
++ DPNI_CNT_ING_FRAME_DISCARD = 0x3,
++ DPNI_CNT_ING_MCAST_FRAME = 0x4,
++ DPNI_CNT_ING_MCAST_BYTE = 0x5,
++ DPNI_CNT_ING_BCAST_FRAME = 0x6,
++ DPNI_CNT_ING_BCAST_BYTES = 0x7,
++ DPNI_CNT_EGR_FRAME = 0x8,
++ DPNI_CNT_EGR_BYTE = 0x9,
++ DPNI_CNT_EGR_FRAME_DISCARD = 0xa
++};
++
++/**
++ * dpni_get_counter() - Read a specific DPNI counter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @counter: The requested counter
++ * @value: Returned counter's current value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpni_counter counter,
++ uint64_t *value);
++
++/**
++ * dpni_set_counter() - Set (or clear) a specific DPNI counter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @counter: The requested counter
++ * @value: New counter value; typically pass '0' for resetting
++ * the counter.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ enum dpni_counter counter,
++ uint64_t value);
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct - Structure representing DPNI link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ */
++struct dpni_link_cfg {
++ uint32_t rate;
++ uint64_t options;
++};
++
++/**
++ * dpni_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_link_cfg *cfg);
++
++/**
++ * struct dpni_link_state - Structure representing DPNI link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ * @up: Link state; '0' for down, '1' for up
++ */
++struct dpni_link_state {
++ uint32_t rate;
++ uint64_t options;
++ int up;
++};
++
++/**
++ * dpni_get_link_state() - Return the link state (either up or down)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @state: Returned link state;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_link_state *state);
++
++/**
++ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
++ * @rate_limit: rate in Mbps
++ * @max_burst_size: burst size in bytes (up to 64KB)
++ */
++struct dpni_tx_shaping_cfg {
++ uint32_t rate_limit;
++ uint16_t max_burst_size;
++};
++
++/**
++ * dpni_set_tx_shaping() - Set the transmit shaping
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tx_shaper: tx shaping configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_tx_shaping_cfg *tx_shaper);
++
++/**
++ * dpni_set_max_frame_length() - Set the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t max_frame_length);
++
++/**
++ * dpni_get_max_frame_length() - Get the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *max_frame_length);
++
++/**
++ * dpni_set_mtu() - Set the MTU for the interface.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mtu: MTU length (in bytes)
++ *
++ * MTU determines the maximum fragment size for performing IP
++ * fragmentation on egress packets.
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_mtu(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t mtu);
++
++/**
++ * dpni_get_mtu() - Get the MTU.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mtu: Returned MTU length (in bytes)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_mtu(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *mtu);
++
++/**
++ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpni_set_primary_mac_addr() - Set the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to set as primary address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6]);
++
++/**
++ * dpni_get_primary_mac_addr() - Get the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: Returned MAC address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t mac_addr[6]);
++
++/**
++ * dpni_add_mac_addr() - Add MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to add
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6]);
++
++/**
++ * dpni_remove_mac_addr() - Remove MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const uint8_t mac_addr[6]);
++
++/**
++ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @unicast: Set to '1' to clear unicast addresses
++ * @multicast: Set to '1' to clear multicast addresses
++ *
++ * The primary MAC address is not cleared by this operation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int unicast,
++ int multicast);
++
++/**
++ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_vlan_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_add_vlan_id() - Add VLAN ID filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @vlan_id: VLAN ID to add
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id);
++
++/**
++ * dpni_remove_vlan_id() - Remove VLAN ID filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @vlan_id: VLAN ID to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id);
++
++/**
++ * dpni_clear_vlan_filters() - Clear all VLAN filters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
++ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
++ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling
++ */
++enum dpni_tx_schedule_mode {
++ DPNI_TX_SCHED_STRICT_PRIORITY,
++ DPNI_TX_SCHED_WEIGHTED,
++};
++
++/**
++ * struct dpni_tx_schedule_cfg - Structure representing Tx
++ * scheduling configuration
++ * @mode: scheduling mode
++ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
++ * not applicable for 'strict-priority' mode;
++ */
++struct dpni_tx_schedule_cfg {
++ enum dpni_tx_schedule_mode mode;
++ uint16_t delta_bandwidth;
++};
++
++/**
++ * struct dpni_tx_selection_cfg - Structure representing transmission
++ * selection configuration
++ * @tc_sched: an array of traffic-classes
++ */
++struct dpni_tx_selection_cfg {
++ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
++};
++
++/**
++ * dpni_set_tx_selection() - Set transmission selection configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: transmission selection configuration
++ *
++ * warning: Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_selection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_tx_selection_cfg *cfg);
++
++/**
++ * enum dpni_dist_mode - DPNI distribution mode
++ * @DPNI_DIST_MODE_NONE: No distribution
++ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
++ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
++ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
++ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
++ */
++enum dpni_dist_mode {
++ DPNI_DIST_MODE_NONE = 0,
++ DPNI_DIST_MODE_HASH = 1,
++ DPNI_DIST_MODE_FS = 2
++};
++
++/**
++ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
++ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
++ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
++ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
++ */
++enum dpni_fs_miss_action {
++ DPNI_FS_MISS_DROP = 0,
++ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
++ DPNI_FS_MISS_HASH = 2
++};
++
++/**
++ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
++ * @miss_action: Miss action selection
++ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
++ */
++struct dpni_fs_tbl_cfg {
++ enum dpni_fs_miss_action miss_action;
++ uint16_t default_flow_id;
++};
++
++/**
++ * dpni_prepare_key_cfg() - function prepare extract parameters
++ * @cfg: defining a full Key Generation profile (rule)
++ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before the following functions:
++ * - dpni_set_rx_tc_dist()
++ * - dpni_set_qos_table()
++ */
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
++ uint8_t *key_cfg_buf);
++
++/**
++ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
++ * @dist_size: Set the distribution size;
++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
++ * 112,128,192,224,256,384,448,512,768,896,1024
++ * @dist_mode: Distribution mode
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * the extractions to be used for the distribution key by calling
++ * dpni_prepare_key_cfg() relevant only when
++ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
++ * @fs_cfg: Flow Steering table configuration; only relevant if
++ * 'dist_mode = DPNI_DIST_MODE_FS'
++ */
++struct dpni_rx_tc_dist_cfg {
++ uint16_t dist_size;
++ enum dpni_dist_mode dist_mode;
++ uint64_t key_cfg_iova;
++ struct dpni_fs_tbl_cfg fs_cfg;
++};
++
++/**
++ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class distribution configuration
++ *
++ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
++ * first to prepare the key_cfg_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg);
++
++/**
++ * Set to select color aware mode (otherwise - color blind)
++ */
++#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001
++/**
++ * Set to discard frame with RED color
++ */
++#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002
++
++/**
++ * enum dpni_policer_mode - selecting the policer mode
++ * @DPNI_POLICER_MODE_NONE: Policer is disabled
++ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through
++ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698
++ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115
++ */
++enum dpni_policer_mode {
++ DPNI_POLICER_MODE_NONE = 0,
++ DPNI_POLICER_MODE_PASS_THROUGH,
++ DPNI_POLICER_MODE_RFC_2698,
++ DPNI_POLICER_MODE_RFC_4115
++};
++
++/**
++ * enum dpni_policer_unit - DPNI policer units
++ * @DPNI_POLICER_UNIT_BYTES: bytes units
++ * @DPNI_POLICER_UNIT_FRAMES: frames units
++ */
++enum dpni_policer_unit {
++ DPNI_POLICER_UNIT_BYTES = 0,
++ DPNI_POLICER_UNIT_FRAMES
++};
++
++/**
++ * enum dpni_policer_color - selecting the policer color
++ * @DPNI_POLICER_COLOR_GREEN: Green color
++ * @DPNI_POLICER_COLOR_YELLOW: Yellow color
++ * @DPNI_POLICER_COLOR_RED: Red color
++ */
++enum dpni_policer_color {
++ DPNI_POLICER_COLOR_GREEN = 0,
++ DPNI_POLICER_COLOR_YELLOW,
++ DPNI_POLICER_COLOR_RED
++};
++
++/**
++ * struct dpni_rx_tc_policing_cfg - Policer configuration
++ * @options: Mask of available options; use 'DPNI_POLICER_OPT_<X>' values
++ * @mode: policer mode
++ * @default_color: For pass-through mode the policer re-colors with this
++ * color any incoming packets. For Color aware non-pass-through mode:
++ * policer re-colors with this color all packets with FD[DROPP]>2.
++ * @units: Bytes or Packets
++ * @cir: Committed information rate (CIR) in Kbps or packets/second
++ * @cbs: Committed burst size (CBS) in bytes or packets
++ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second
++ * Excess information rate (EIR, rfc4115) in Kbps or packets/second
++ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets
++ * Excess burst size (EBS, rfc4115) in bytes or packets
++ */
++struct dpni_rx_tc_policing_cfg {
++ uint32_t options;
++ enum dpni_policer_mode mode;
++ enum dpni_policer_unit units;
++ enum dpni_policer_color default_color;
++ uint32_t cir;
++ uint32_t cbs;
++ uint32_t eir;
++ uint32_t ebs;
++};
++
++/**
++ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class policing configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rx_tc_policing_cfg *cfg);
++
++/**
++ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class policing configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_rx_tc_policing_cfg *cfg);
++
++/**
++ * enum dpni_congestion_unit - DPNI congestion units
++ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
++ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
++ */
++enum dpni_congestion_unit {
++ DPNI_CONGESTION_UNIT_BYTES = 0,
++ DPNI_CONGESTION_UNIT_FRAMES
++};
++
++/**
++ * enum dpni_early_drop_mode - DPNI early drop mode
++ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled
++ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
++ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode
++ */
++enum dpni_early_drop_mode {
++ DPNI_EARLY_DROP_MODE_NONE = 0,
++ DPNI_EARLY_DROP_MODE_TAIL,
++ DPNI_EARLY_DROP_MODE_WRED
++};
++
++/**
++ * struct dpni_wred_cfg - WRED configuration
++ * @max_threshold: maximum threshold that packets may be discarded. Above this
++ * threshold all packets are discarded; must be less than 2^39;
++ * approximated to be expressed as (x+256)*2^(y-1) due to HW
++ * implementation.
++ * @min_threshold: minimum threshold that packets may be discarded at
++ * @drop_probability: probability that a packet will be discarded (1-100,
++ * associated with the max_threshold).
++ */
++struct dpni_wred_cfg {
++ uint64_t max_threshold;
++ uint64_t min_threshold;
++ uint8_t drop_probability;
++};
++
++/**
++ * struct dpni_early_drop_cfg - early-drop configuration
++ * @mode: drop mode
++ * @units: units type
++ * @green: WRED - 'green' configuration
++ * @yellow: WRED - 'yellow' configuration
++ * @red: WRED - 'red' configuration
++ * @tail_drop_threshold: tail drop threshold
++ */
++struct dpni_early_drop_cfg {
++ enum dpni_early_drop_mode mode;
++ enum dpni_congestion_unit units;
++
++ struct dpni_wred_cfg green;
++ struct dpni_wred_cfg yellow;
++ struct dpni_wred_cfg red;
++
++ uint32_t tail_drop_threshold;
++};
++
++/**
++ * dpni_prepare_early_drop() - prepare an early drop.
++ * @cfg: Early-drop configuration
++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before dpni_set_rx_tc_early_drop or
++ * dpni_set_tx_tc_early_drop
++ *
++ */
++void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg,
++ uint8_t *early_drop_buf);
++
++/**
++ * dpni_extract_early_drop() - extract the early drop configuration.
++ * @cfg: Early-drop configuration
++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called after dpni_get_rx_tc_early_drop or
++ * dpni_get_tx_tc_early_drop
++ *
++ */
++void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
++ const uint8_t *early_drop_buf);
++
++/**
++ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
++ * with the early-drop configuration by calling dpni_prepare_early_drop()
++ *
++ * warning: Before calling this function, call dpni_prepare_early_drop() to
++ * prepare the early_drop_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
++ *
++ * warning: After calling this function, call dpni_extract_early_drop() to
++ * get the early drop configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
++ * with the early-drop configuration by calling dpni_prepare_early_drop()
++ *
++ * warning: Before calling this function, call dpni_prepare_early_drop() to
++ * prepare the early_drop_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
++ *
++ * warning: After calling this function, call dpni_extract_early_drop() to
++ * get the early drop configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * enum dpni_dest - DPNI destination types
++ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
++ * does not generate FQDAN notifications; user is expected to
++ * dequeue from the queue based on polling or other user-defined
++ * method
++ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue
++ * from the queue only after notification is received
++ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON
++ * object; user is expected to dequeue from the DPCON channel
++ */
++enum dpni_dest {
++ DPNI_DEST_NONE = 0,
++ DPNI_DEST_DPIO = 1,
++ DPNI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid values
++ * are 0-1 or 0-7, depending on the number of priorities in that
++ * channel; not relevant for 'DPNI_DEST_NONE' option
++ */
++struct dpni_dest_cfg {
++ enum dpni_dest dest_type;
++ int dest_id;
++ uint8_t priority;
++};
++
++/* DPNI congestion options */
++
++/**
++ * CSCN message is written to message_iova once entering a
++ * congestion state (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
++/**
++ * CSCN message is written to message_iova once exiting a
++ * congestion state (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
++/**
++ * CSCN write will attempt to allocate into a cache (coherent write);
++ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
++ */
++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once entering a congestion state
++ * (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once exiting a congestion state
++ * (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
++ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
++ */
++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
++
++/**
++ * struct dpni_congestion_notification_cfg - congestion notification
++ * configuration
++ * @units: units type
++ * @threshold_entry: above this threshold we enter a congestion state.
++ * set it to '0' to disable it
++ * @threshold_exit: below this threshold we exit the congestion state.
++ * @message_ctx: The context that will be part of the CSCN message
++ * @message_iova: I/O virtual address (must be in DMA-able memory),
++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
++ * contained in 'options'
++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
++ * @options: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
++ */
++
++struct dpni_congestion_notification_cfg {
++ enum dpni_congestion_unit units;
++ uint32_t threshold_entry;
++ uint32_t threshold_exit;
++ uint64_t message_ctx;
++ uint64_t message_iova;
++ struct dpni_dest_cfg dest_cfg;
++ uint16_t options;
++};
++
++/**
++ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * enum dpni_flc_type - DPNI FLC types
++ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value
++ * @DPNI_FLC_STASH: select the FLC to be used for stash control
++ */
++enum dpni_flc_type {
++ DPNI_FLC_USER_DEFINED = 0,
++ DPNI_FLC_STASH = 1,
++};
++
++/**
++ * enum dpni_stash_size - DPNI FLC stashing size
++ * @DPNI_STASH_SIZE_0B: no stash
++ * @DPNI_STASH_SIZE_64B: stashes 64 bytes
++ * @DPNI_STASH_SIZE_128B: stashes 128 bytes
++ * @DPNI_STASH_SIZE_192B: stashes 192 bytes
++ */
++enum dpni_stash_size {
++ DPNI_STASH_SIZE_0B = 0,
++ DPNI_STASH_SIZE_64B = 1,
++ DPNI_STASH_SIZE_128B = 2,
++ DPNI_STASH_SIZE_192B = 3,
++};
++
++/* DPNI FLC stash options */
++
++/**
++ * stashes the whole annotation area (up to 192 bytes)
++ */
++#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001
++
++/**
++ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration
++ * @flc_type: FLC type
++ * @options: Mask of available options;
++ * use 'DPNI_FLC_STASH_<X>' values
++ * @frame_data_size: Size of frame data to be stashed
++ * @flow_context_size: Size of flow context to be stashed
++ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED':
++ * this value will be provided in the frame descriptor
++ * (FD[FLC])
++ * 2. In case flc_type is 'DPNI_FLC_STASH':
++ * this value will be I/O virtual address of the
++ * flow-context;
++ * Must be cacheline-aligned and DMA-able memory
++ */
++struct dpni_flc_cfg {
++ enum dpni_flc_type flc_type;
++ uint32_t options;
++ enum dpni_stash_size frame_data_size;
++ enum dpni_stash_size flow_context_size;
++ uint64_t flow_context;
++};
++
++/**
++ * DPNI queue modification options
++ */
++
++/**
++ * Select to modify the user's context associated with the queue
++ */
++#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
++/**
++ * Select to modify the queue's destination
++ */
++#define DPNI_QUEUE_OPT_DEST 0x00000002
++/** Select to modify the flow-context parameters;
++ * not applicable for Tx-conf/Err queues as the FD comes from the user
++ */
++#define DPNI_QUEUE_OPT_FLC 0x00000004
++/**
++ * Select to modify the queue's order preservation
++ */
++#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008
++/* Select to modify the queue's tail-drop threshold */
++#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010
++
++/**
++ * struct dpni_queue_cfg - Structure representing queue configuration
++ * @options: Flags representing the suggested modifications to the queue;
++ * Use any combination of 'DPNI_QUEUE_OPT_<X>' flags
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX'
++ * is contained in 'options'
++ * @dest_cfg: Queue destination parameters;
++ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options'
++ * @flc_cfg: Flow context configuration; in case the TC's distribution
++ * is either NONE or HASH the FLC's settings of flow#0 are used.
++ * in the case of FS (flow-steering) the flow's FLC settings
++ * are used.
++ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options'
++ * @order_preservation_en: enable/disable order preservation;
++ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained
++ * in 'options'
++ * @tail_drop_threshold: set the queue's tail drop threshold in bytes;
++ * '0' value disable the threshold; maximum value is 0xE000000;
++ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained
++ * in 'options'
++ */
++struct dpni_queue_cfg {
++ uint32_t options;
++ uint64_t user_ctx;
++ struct dpni_dest_cfg dest_cfg;
++ struct dpni_flc_cfg flc_cfg;
++ int order_preservation_en;
++ uint32_t tail_drop_threshold;
++};
++
++/**
++ * struct dpni_queue_attr - Structure representing queue attributes
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame
++ * @dest_cfg: Queue destination configuration
++ * @flc_cfg: Flow context configuration
++ * @order_preservation_en: enable/disable order preservation
++ * @tail_drop_threshold: queue's tail drop threshold in bytes;
++ * @fqid: Virtual fqid value to be used for dequeue operations
++ */
++struct dpni_queue_attr {
++ uint64_t user_ctx;
++ struct dpni_dest_cfg dest_cfg;
++ struct dpni_flc_cfg flc_cfg;
++ int order_preservation_en;
++ uint32_t tail_drop_threshold;
++
++ uint32_t fqid;
++};
++
++/**
++ * DPNI Tx flow modification options
++ */
++
++/**
++ * Select to modify the settings for dedicate Tx confirmation/error
++ */
++#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001
++/**
++ * Select to modify the L3 checksum generation setting
++ */
++#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010
++/**
++ * Select to modify the L4 checksum generation setting
++ */
++#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020
++
++/**
++ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration
++ * @options: Flags representing the suggested modifications to the Tx flow;
++ * Use any combination 'DPNI_TX_FLOW_OPT_<X>' flags
++ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx
++ * confirmation and error queue; Set to '0' to use the private
++ * Tx confirmation and error queue; valid only if
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation
++ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options'
++ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable;
++ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options'
++ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable;
++ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options'
++ */
++struct dpni_tx_flow_cfg {
++ uint32_t options;
++ int use_common_tx_conf_queue;
++ int l3_chksum_gen;
++ int l4_chksum_gen;
++};
++
++/**
++ * dpni_set_tx_flow() - Set Tx flow configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: Provides (or returns) the sender's flow ID;
++ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate
++ * a new flow_id; this ID should be used as the QDBIN argument
++ * in enqueue operations
++ * @cfg: Tx flow configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *flow_id,
++ const struct dpni_tx_flow_cfg *cfg);
++
++/**
++ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes
++ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and
++ * error queue; '0' if using private Tx confirmation and error queue
++ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled
++ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled
++ */
++struct dpni_tx_flow_attr {
++ int use_common_tx_conf_queue;
++ int l3_chksum_gen;
++ int l4_chksum_gen;
++};
++
++/**
++ * dpni_get_tx_flow() - Get Tx flow attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function
++ * @attr: Returned Tx flow attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_tx_flow_attr *attr);
++
++/**
++ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration
++ * @errors_only: Set to '1' to report back only error frames;
++ * Set to '0' to confirm transmission/error for all transmitted frames;
++ * @queue_cfg: Queue configuration
++ */
++struct dpni_tx_conf_cfg {
++ int errors_only;
++ struct dpni_queue_cfg queue_cfg;
++};
++
++/**
++ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function;
++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
++ * @cfg: Queue configuration
++ *
++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
++ * i.e. only serve the common tx-conf-err queue;
++ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported
++ * back - successfully transmitted frames are not confirmed. Otherwise, all
++ * transmitted frames are sent for confirmation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_conf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ const struct dpni_tx_conf_cfg *cfg);
++
++/**
++ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes
++ * @errors_only: '1' if only error frames are reported back; '0' if all
++ * transmitted frames are confirmed
++ * @queue_attr: Queue attributes
++ */
++struct dpni_tx_conf_attr {
++ int errors_only;
++ struct dpni_queue_attr queue_attr;
++};
++
++/**
++ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function;
++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
++ * @attr: Returned tx-conf attributes
++ *
++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
++ * i.e. only serve the common tx-conf-err queue;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_conf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_tx_conf_attr *attr);
++
++/**
++ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function;
++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
++ * @cfg: congestion notification configuration
++ *
++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
++ * i.e. only serve the common tx-conf-err queue;
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @flow_id: The sender's flow ID, as returned by the
++ * dpni_set_tx_flow() function;
++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
++ * @cfg: congestion notification
++ *
++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
++ * i.e. only serve the common tx-conf-err queue;
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t flow_id,
++ struct dpni_congestion_notification_cfg *cfg);
++
++/**
++ * dpni_set_tx_conf_revoke() - Tx confirmation revocation
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @revoke: revoke or not
++ *
++ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not
++ * selected at DPNI creation.
++ * Calling this function with 'revoke' set to '1' disables all transmit
++ * confirmation (including the private confirmation queues), regardless of
++ * previous settings; Note that in this case, Tx error frames are still
++ * enqueued to the general transmit errors queue.
++ * Calling this function with 'revoke' set to '0' restores the previous
++ * settings for both general and private transmit confirmation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int revoke);
++
++/**
++ * dpni_set_rx_flow() - Set Rx flow configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7);
++ * use 'DPNI_ALL_TCS' to set all TCs and all flows
++ * @flow_id: Rx flow id within the traffic class; use
++ * 'DPNI_ALL_TC_FLOWS' to set all flows within
++ * this tc_id; ignored if tc_id is set to
++ * 'DPNI_ALL_TCS';
++ * @cfg: Rx flow configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_rx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint16_t flow_id,
++ const struct dpni_queue_cfg *cfg);
++
++/**
++ * dpni_get_rx_flow() - Get Rx flow attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @flow_id: Rx flow id within the traffic class
++ * @attr: Returned Rx flow attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_rx_flow(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ uint16_t flow_id,
++ struct dpni_queue_attr *attr);
++
++/**
++ * dpni_set_rx_err_queue() - Set Rx error queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Queue configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_queue_cfg *cfg);
++
++/**
++ * dpni_get_rx_err_queue() - Get Rx error queue attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @attr: Returned Queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpni_queue_attr *attr);
++
++/**
++ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * key extractions to be used as the QoS criteria by calling
++ * dpni_prepare_key_cfg()
++ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
++ * '0' to use the 'default_tc' in such cases
++ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
++ */
++struct dpni_qos_tbl_cfg {
++ uint64_t key_cfg_iova;
++ int discard_on_miss;
++ uint8_t default_tc;
++};
++
++/**
++ * dpni_set_qos_table() - Set QoS mapping table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS table configuration
++ *
++ * This function and all QoS-related functions require that
++ *'max_tcs > 1' was set at DPNI creation.
++ *
++ * warning: Before calling this function, call dpni_prepare_key_cfg() to
++ * prepare the key_cfg_iova parameter
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_qos_table(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_qos_tbl_cfg *cfg);
++
++/**
++ * struct dpni_rule_cfg - Rule configuration for table lookup
++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
++ * @key_size: key and mask size (in bytes)
++ */
++struct dpni_rule_cfg {
++ uint64_t key_iova;
++ uint64_t mask_iova;
++ uint8_t key_size;
++};
++
++/**
++ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to add
++ * @tc_id: Traffic class selection (0-7)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_rule_cfg *cfg,
++ uint8_t tc_id);
++
++/**
++ * dpni_remove_qos_entry() - Remove QoS mapping entry
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpni_rule_cfg *cfg);
++
++/**
++ * dpni_clear_qos_table() - Clear all QoS mapping entries
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Following this function call, all frames are directed to
++ * the default traffic class (0)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
++ * (to select a flow ID)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Flow steering rule to add
++ * @flow_id: Flow id selection (must be smaller than the
++ * distribution size of the traffic class)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rule_cfg *cfg,
++ uint16_t flow_id);
++
++/**
++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
++ * traffic class
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Flow steering rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id,
++ const struct dpni_rule_cfg *cfg);
++
++/**
++ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific
++ * traffic class
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t tc_id);
++
++/**
++ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set
++ * at DPNI creation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set
++ * at DPNI creation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_vlan_removal(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_ipr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++/**
++ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI
++ * creation. Fragmentation is performed according to MTU value
++ * set by dpni_set_mtu() function
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_ipf(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en);
++
++#endif /* __FSL_DPNI_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+new file mode 100644
+index 0000000..c0f8af0
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+@@ -0,0 +1,1058 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPNI_CMD_H
++#define _FSL_DPNI_CMD_H
++
++/* DPNI Version */
++#define DPNI_VER_MAJOR 6
++#define DPNI_VER_MINOR 0
++
++/* Command IDs */
++#define DPNI_CMDID_OPEN 0x801
++#define DPNI_CMDID_CLOSE 0x800
++#define DPNI_CMDID_CREATE 0x901
++#define DPNI_CMDID_DESTROY 0x900
++
++#define DPNI_CMDID_ENABLE 0x002
++#define DPNI_CMDID_DISABLE 0x003
++#define DPNI_CMDID_GET_ATTR 0x004
++#define DPNI_CMDID_RESET 0x005
++#define DPNI_CMDID_IS_ENABLED 0x006
++
++#define DPNI_CMDID_SET_IRQ 0x010
++#define DPNI_CMDID_GET_IRQ 0x011
++#define DPNI_CMDID_SET_IRQ_ENABLE 0x012
++#define DPNI_CMDID_GET_IRQ_ENABLE 0x013
++#define DPNI_CMDID_SET_IRQ_MASK 0x014
++#define DPNI_CMDID_GET_IRQ_MASK 0x015
++#define DPNI_CMDID_GET_IRQ_STATUS 0x016
++#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPNI_CMDID_SET_POOLS 0x200
++#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201
++#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202
++#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203
++#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204
++#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205
++#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206
++#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207
++#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208
++#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209
++#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A
++#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B
++#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C
++
++#define DPNI_CMDID_GET_QDID 0x210
++#define DPNI_CMDID_GET_SP_INFO 0x211
++#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212
++#define DPNI_CMDID_GET_COUNTER 0x213
++#define DPNI_CMDID_SET_COUNTER 0x214
++#define DPNI_CMDID_GET_LINK_STATE 0x215
++#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216
++#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217
++#define DPNI_CMDID_SET_MTU 0x218
++#define DPNI_CMDID_GET_MTU 0x219
++#define DPNI_CMDID_SET_LINK_CFG 0x21A
++#define DPNI_CMDID_SET_TX_SHAPING 0x21B
++
++#define DPNI_CMDID_SET_MCAST_PROMISC 0x220
++#define DPNI_CMDID_GET_MCAST_PROMISC 0x221
++#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222
++#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223
++#define DPNI_CMDID_SET_PRIM_MAC 0x224
++#define DPNI_CMDID_GET_PRIM_MAC 0x225
++#define DPNI_CMDID_ADD_MAC_ADDR 0x226
++#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227
++#define DPNI_CMDID_CLR_MAC_FILTERS 0x228
++
++#define DPNI_CMDID_SET_VLAN_FILTERS 0x230
++#define DPNI_CMDID_ADD_VLAN_ID 0x231
++#define DPNI_CMDID_REMOVE_VLAN_ID 0x232
++#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233
++
++#define DPNI_CMDID_SET_RX_TC_DIST 0x235
++#define DPNI_CMDID_SET_TX_FLOW 0x236
++#define DPNI_CMDID_GET_TX_FLOW 0x237
++#define DPNI_CMDID_SET_RX_FLOW 0x238
++#define DPNI_CMDID_GET_RX_FLOW 0x239
++#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A
++#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B
++
++#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E
++#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F
++
++#define DPNI_CMDID_SET_QOS_TBL 0x240
++#define DPNI_CMDID_ADD_QOS_ENT 0x241
++#define DPNI_CMDID_REMOVE_QOS_ENT 0x242
++#define DPNI_CMDID_CLR_QOS_TBL 0x243
++#define DPNI_CMDID_ADD_FS_ENT 0x244
++#define DPNI_CMDID_REMOVE_FS_ENT 0x245
++#define DPNI_CMDID_CLR_FS_ENT 0x246
++#define DPNI_CMDID_SET_VLAN_INSERTION 0x247
++#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248
++#define DPNI_CMDID_SET_IPR 0x249
++#define DPNI_CMDID_SET_IPF 0x24A
++
++#define DPNI_CMDID_SET_TX_SELECTION 0x250
++#define DPNI_CMDID_GET_RX_TC_POLICING 0x251
++#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252
++#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253
++#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254
++#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255
++#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256
++#define DPNI_CMDID_SET_TX_CONF 0x257
++#define DPNI_CMDID_GET_TX_CONF 0x258
++#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259
++#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A
++#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B
++#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_OPEN(cmd, dpni_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
++
++#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \
++do { \
++ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \
++ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \
++ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \
++ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \
++ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \
++ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \
++ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \
++ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \
++ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \
++ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \
++ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \
++ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \
++ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \
++ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \
++ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \
++ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \
++ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \
++ cfg->ipr_cfg.max_open_frames_ipv4); \
++ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \
++ cfg->ipr_cfg.max_open_frames_ipv6); \
++ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \
++ cfg->ipr_cfg.max_reass_frm_size); \
++ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \
++ cfg->ipr_cfg.min_frag_size_ipv4); \
++ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \
++ cfg->ipr_cfg.min_frag_size_ipv6); \
++} while (0)
++
++#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \
++do { \
++ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \
++ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \
++ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \
++ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \
++ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \
++ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \
++ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \
++ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \
++ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \
++ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \
++ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \
++ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \
++ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \
++ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \
++ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \
++ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \
++ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \
++ cfg->ipr_cfg.max_open_frames_ipv4); \
++ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \
++ cfg->ipr_cfg.max_open_frames_ipv6); \
++ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \
++ cfg->ipr_cfg.max_reass_frm_size); \
++ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \
++ cfg->ipr_cfg.min_frag_size_ipv4); \
++ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \
++ cfg->ipr_cfg.min_frag_size_ipv6); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \
++ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \
++ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \
++ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \
++ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \
++ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \
++ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \
++ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \
++ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \
++ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \
++ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_POOLS(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
++ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
++ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
++ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
++ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
++ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
++ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
++ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
++ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
++ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
++ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
++ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
++ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
++ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
++ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
++ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
++ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
++ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_ATTR(cmd, attr) \
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \
++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \
++ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \
++ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \
++ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\
++ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \
++ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \
++ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \
++ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \
++ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \
++ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \
++ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \
++ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_QDID(cmd, qdid) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_COUNTER(cmd, counter) \
++ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_COUNTER(cmd, value) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_MTU(cmd, mtu) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_MTU(cmd, mtu) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
++do { \
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
++ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
++ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \
++ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\
++ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[0].mode); \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\
++ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[1].mode); \
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\
++ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[2].mode); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\
++ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[3].mode); \
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\
++ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[4].mode); \
++ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\
++ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[5].mode); \
++ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\
++ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[6].mode); \
++ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\
++ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \
++ cfg->tc_sched[7].mode); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \
++ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \
++ cfg->fs_cfg.miss_action); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\
++ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\
++ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \
++ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\
++ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\
++ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\
++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
++ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \
++ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \
++ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
++ cfg->flc_cfg.frame_data_size);\
++ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
++ cfg->flc_cfg.flow_context_size);\
++ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\
++ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \
++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \
++ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \
++ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
++ attr->flc_cfg.frame_data_size);\
++ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
++ attr->flc_cfg.flow_context_size);\
++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\
++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \
++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \
++ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
++ cfg->flc_cfg.frame_data_size);\
++ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
++ cfg->flc_cfg.flow_context_size);\
++ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\
++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \
++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \
++ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \
++ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
++ attr->flc_cfg.frame_data_size);\
++ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
++ attr->flc_cfg.flow_context_size);\
++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, revoke)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \
++ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IPR(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_IPF(cmd, en) \
++ MC_CMD_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \
++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \
++ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \
++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \
++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \
++ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \
++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \
++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_PREP_EARLY_DROP(ext, cfg) \
++do { \
++ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \
++ MC_PREP_OP(ext, 0, 2, 2, \
++ enum dpni_congestion_unit, cfg->units); \
++ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
++ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
++ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
++ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
++ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
++ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
++ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \
++ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \
++ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_EXT_EARLY_DROP(ext, cfg) \
++do { \
++ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \
++ MC_EXT_OP(ext, 0, 2, 2, \
++ enum dpni_congestion_unit, cfg->units); \
++ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
++ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
++ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
++ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
++ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
++ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
++ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \
++ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \
++ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id)
++
++#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id)
++
++#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \
++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \
++ cfg->queue_cfg.dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \
++ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \
++ cfg->queue_cfg.tail_drop_threshold); \
++ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \
++ cfg->queue_cfg.flc_cfg.flc_type); \
++ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \
++ cfg->queue_cfg.flc_cfg.frame_data_size); \
++ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \
++ cfg->queue_cfg.flc_cfg.flow_context_size); \
++ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \
++ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \
++ cfg->queue_cfg.flc_cfg.flow_context); \
++} while (0)
++
++#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
++
++#define DPNI_RSP_GET_TX_CONF(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \
++ attr->queue_attr.dest_cfg.priority); \
++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \
++ attr->queue_attr.dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \
++ MC_RSP_OP(cmd, 0, 46, 1, int, \
++ attr->queue_attr.order_preservation_en); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \
++ attr->queue_attr.tail_drop_threshold); \
++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \
++ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \
++ attr->queue_attr.flc_cfg.flc_type); \
++ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \
++ attr->queue_attr.flc_cfg.frame_data_size); \
++ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \
++ attr->queue_attr.flc_cfg.flow_context_size); \
++ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \
++ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \
++ attr->queue_attr.flc_cfg.flow_context); \
++} while (0)
++
++#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
++
++#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
++} while (0)
++
++#endif /* _FSL_DPNI_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dprc.h b/drivers/net/dpaa2/mc/fsl_dprc.h
+new file mode 100644
+index 0000000..c831f46
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dprc.h
+@@ -0,0 +1,1032 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPRC_H
++#define _FSL_DPRC_H
++
++/* Data Path Resource Container API
++ * Contains DPRC API for managing and querying DPAA resources
++ */
++
++struct fsl_mc_io;
++
++/**
++ * Set this value as the icid value in dprc_cfg structure when creating a
++ * container, in case the ICID is not selected by the user and should be
++ * allocated by the DPRC from the pool of ICIDs.
++ */
++#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0))
++
++/**
++ * Set this value as the portal_id value in dprc_cfg structure when creating a
++ * container, in case the portal ID is not specifically selected by the
++ * user and should be allocated by the DPRC from the pool of portal ids.
++ */
++#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0))
++
++/**
++ * dprc_get_container_id() - Get container ID associated with a given portal.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @container_id: Requested container ID
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_container_id(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int *container_id);
++
++/**
++ * dprc_open() - Open DPRC object for use
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @container_id: Container ID to open
++ * @token: Returned token of DPRC object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Required before any operation on the object.
++ */
++int dprc_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int container_id,
++ uint16_t *token);
++
++/**
++ * dprc_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * Container general options
++ *
++ * These options may be selected at container creation by the container creator
++ * and can be retrieved using dprc_get_attributes()
++ */
++
++/**
++ * Spawn Policy Option allowed - Indicates that the new container is allowed
++ * to spawn and have its own child containers.
++ */
++#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
++
++/**
++ * General Container allocation policy - Indicates that the new container is
++ * allowed to allocate requested resources from its parent container; if not
++ * set, the container is only allowed to use resources in its own pools; Note
++ * that this is a container's global policy, but the parent container may
++ * override it and set specific quota per resource type.
++ */
++#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
++
++/**
++ * Object initialization allowed - software context associated with this
++ * container is allowed to invoke object initialization operations.
++ */
++#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
++
++/**
++ * Topology change allowed - software context associated with this
++ * container is allowed to invoke topology operations, such as attach/detach
++ * of network objects.
++ */
++#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
++
++/**
++ * AIOP - Indicates that container belongs to AIOP.
++ */
++#define DPRC_CFG_OPT_AIOP 0x00000020
++
++/**
++ * IRQ Config - Indicates that the container allowed to configure its IRQs.
++ */
++#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040
++
++/**
++ * struct dprc_cfg - Container configuration options
++ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free
++ * ICID value is allocated by the DPRC
++ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free
++ * portal ID is allocated by the DPRC
++ * @options: Combination of 'DPRC_CFG_OPT_<X>' options
++ * @label: Object's label
++ */
++struct dprc_cfg {
++ uint16_t icid;
++ int portal_id;
++ uint64_t options;
++ char label[16];
++};
++
++/**
++ * dprc_create_container() - Create child container
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @cfg: Child container configuration
++ * @child_container_id: Returned child container ID
++ * @child_portal_offset: Returned child portal offset from MC portal base
++ *
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_create_container(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dprc_cfg *cfg,
++ int *child_container_id,
++ uint64_t *child_portal_offset);
++
++/**
++ * dprc_destroy_container() - Destroy child container.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @child_container_id: ID of the container to destroy
++ *
++ * This function terminates the child container, so following this call the
++ * child container ID becomes invalid.
++ *
++ * Notes:
++ * - All resources and objects of the destroyed container are returned to the
++ * parent container or destroyed if were created be the destroyed container.
++ * - This function destroy all the child containers of the specified
++ * container prior to destroying the container itself.
++ *
++ * warning: Only the parent container is allowed to destroy a child policy
++ * Container 0 can't be destroyed
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ */
++int dprc_destroy_container(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id);
++
++/**
++ * dprc_reset_container - Reset child container.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @child_container_id: ID of the container to reset
++ *
++ * In case a software context crashes or becomes non-responsive, the parent
++ * may wish to reset its resources container before the software context is
++ * restarted.
++ *
++ * This routine informs all objects assigned to the child container that the
++ * container is being reset, so they may perform any cleanup operations that are
++ * needed. All objects handles that were owned by the child container shall be
++ * closed.
++ *
++ * Note that such request may be submitted even if the child software context
++ * has not crashed, but the resulting object cleanup operations will not be
++ * aware of that.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_reset_container(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id);
++
++/**
++ * DPRC IRQ Index and Events
++ */
++
++/**
++ * IRQ index
++ */
++#define DPRC_IRQ_INDEX 0
++
++/**
++ * Number of dprc's IRQs
++ */
++#define DPRC_NUM_OF_IRQS 1
++
++/* DPRC IRQ events */
++/**
++ * IRQ event - Indicates that a new object added to the container
++ */
++#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
++/**
++ * IRQ event - Indicates that an object was removed from the container
++ */
++#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
++/**
++ * IRQ event - Indicates that resources added to the container
++ */
++#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
++/**
++ * IRQ event - Indicates that resources removed from the container
++ */
++#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
++/**
++ * IRQ event - Indicates that one of the descendant containers that opened by
++ * this container is destroyed
++ */
++#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
++/**
++ * IRQ event - Indicates that on one of the container's opened object is
++ * destroyed
++ */
++#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
++/**
++ * Irq event - Indicates that object is created at the container
++ */
++#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
++
++/**
++ * struct dprc_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dprc_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dprc_irq_cfg *irq_cfg);
++
++/**
++ * dprc_get_irq() - Get IRQ information from the DPRC.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dprc_irq_cfg *irq_cfg);
++
++/**
++ * dprc_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dprc_get_irq_enable() - Get overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dprc_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dprc_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dprc_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dprc_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dprc_attributes - Container attributes
++ * @container_id: Container's ID
++ * @icid: Container's ICID
++ * @portal_id: Container's portal ID
++ * @options: Container's options as set at container's creation
++ * @version: DPRC version
++ */
++struct dprc_attributes {
++ int container_id;
++ uint16_t icid;
++ int portal_id;
++ uint64_t options;
++ /**
++ * struct version - DPRC version
++ * @major: DPRC major version
++ * @minor: DPRC minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++};
++
++/**
++ * dprc_get_attributes() - Obtains container attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @attributes: Returned container attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dprc_attributes *attributes);
++
++/**
++ * dprc_set_res_quota() - Set allocation policy for a specific resource/object
++ * type in a child container
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @child_container_id: ID of the child container
++ * @type: Resource/object type
++ * @quota: Sets the maximum number of resources of the selected type
++ * that the child container is allowed to allocate from its parent;
++ * when quota is set to -1, the policy is the same as container's
++ * general policy.
++ *
++ * Allocation policy determines whether or not a container may allocate
++ * resources from its parent. Each container has a 'global' allocation policy
++ * that is set when the container is created.
++ *
++ * This function sets allocation policy for a specific resource type.
++ * The default policy for all resource types matches the container's 'global'
++ * allocation policy.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Only the parent container is allowed to change a child policy.
++ */
++int dprc_set_res_quota(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id,
++ char *type,
++ uint16_t quota);
++
++/**
++ * dprc_get_res_quota() - Gets the allocation policy of a specific
++ * resource/object type in a child container
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @child_container_id: ID of the child container
++ * @type: resource/object type
++ * @quota: Returnes the maximum number of resources of the selected type
++ * that the child container is allowed to allocate from the parent;
++ * when quota is set to -1, the policy is the same as container's
++ * general policy.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_res_quota(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id,
++ char *type,
++ uint16_t *quota);
++
++/* Resource request options */
++
++/**
++ * Explicit resource ID request - The requested objects/resources
++ * are explicit and sequential (in case of resources).
++ * The base ID is given at res_req at base_align field
++ */
++#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
++
++/**
++ * Aligned resources request - Relevant only for resources
++ * request (and not objects). Indicates that resources base ID should be
++ * sequential and aligned to the value given at dprc_res_req base_align field
++ */
++#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
++
++/**
++ * Plugged Flag - Relevant only for object assignment request.
++ * Indicates that after all objects assigned. An interrupt will be invoked at
++ * the relevant GPP. The assigned object will be marked as plugged.
++ * plugged objects can't be assigned from their container
++ */
++#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
++
++/**
++ * struct dprc_res_req - Resource request descriptor, to be used in assignment
++ * or un-assignment of resources and objects.
++ * @type: Resource/object type: Represent as a NULL terminated string.
++ * This string may received by using dprc_get_pool() to get resource
++ * type and dprc_get_obj() to get object type;
++ * Note: it is not possible to assign/un-assign DPRC objects
++ * @num: Number of resources
++ * @options: Request options: combination of DPRC_RES_REQ_OPT_ options
++ * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT
++ * is set at option), this field represents the required base ID
++ * for resource allocation; In case of aligned assignment
++ * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field
++ * indicates the required alignment for the resource ID(s) -
++ * use 0 if there is no alignment or explicit ID requirements
++ */
++struct dprc_res_req {
++ char type[16];
++ uint32_t num;
++ uint32_t options;
++ int id_base_align;
++};
++
++/**
++ * dprc_assign() - Assigns objects or resource to a child container.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @container_id: ID of the child container
++ * @res_req: Describes the type and amount of resources to
++ * assign to the given container
++ *
++ * Assignment is usually done by a parent (this DPRC) to one of its child
++ * containers.
++ *
++ * According to the DPRC allocation policy, the assigned resources may be taken
++ * (allocated) from the container's ancestors, if not enough resources are
++ * available in the container itself.
++ *
++ * The type of assignment depends on the dprc_res_req options, as follows:
++ * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have
++ * the explicit base ID specified at the id_base_align field of res_req.
++ * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be
++ * aligned to the value given at id_base_align field of res_req.
++ * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment,
++ * and indicates that the object must be set to the plugged state.
++ *
++ * A container may use this function with its own ID in order to change a
++ * object state to plugged or unplugged.
++ *
++ * If IRQ information has been set in the child DPRC, it will signal an
++ * interrupt following every change in its object assignment.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_assign(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int container_id,
++ struct dprc_res_req *res_req);
++
++/**
++ * dprc_unassign() - Un-assigns objects or resources from a child container
++ * and moves them into this (parent) DPRC.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @child_container_id: ID of the child container
++ * @res_req: Describes the type and amount of resources to un-assign from
++ * the child container
++ *
++ * Un-assignment of objects can succeed only if the object is not in the
++ * plugged or opened state.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_unassign(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int child_container_id,
++ struct dprc_res_req *res_req);
++
++/**
++ * dprc_get_pool_count() - Get the number of dprc's pools
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @pool_count: Returned number of resource pools in the dprc
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_pool_count(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *pool_count);
++
++/**
++ * dprc_get_pool() - Get the type (string) of a certain dprc's pool
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @pool_index: Index of the pool to be queried (< pool_count)
++ * @type: The type of the pool
++ *
++ * The pool types retrieved one by one by incrementing
++ * pool_index up to (not including) the value of pool_count returned
++ * from dprc_get_pool_count(). dprc_get_pool_count() must
++ * be called prior to dprc_get_pool().
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_pool(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int pool_index,
++ char *type);
++
++/**
++ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_count: Number of objects assigned to the DPRC
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_obj_count(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *obj_count);
++
++/**
++ * Objects Attributes Flags
++ */
++
++/**
++ * Opened state - Indicates that an object is open by at least one owner
++ */
++#define DPRC_OBJ_STATE_OPEN 0x00000001
++/**
++ * Plugged state - Indicates that the object is plugged
++ */
++#define DPRC_OBJ_STATE_PLUGGED 0x00000002
++
++/**
++ * Shareability flag - Object flag indicating no memory shareability.
++ * the object generates memory accesses that are non coherent with other
++ * masters;
++ * user is responsible for proper memory handling through IOMMU configuration.
++ */
++#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
++
++/**
++ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
++ * @type: Type of object: NULL terminated string
++ * @id: ID of logical object resource
++ * @vendor: Object vendor identifier
++ * @ver_major: Major version number
++ * @ver_minor: Minor version number
++ * @irq_count: Number of interrupts supported by the object
++ * @region_count: Number of mappable regions supported by the object
++ * @state: Object state: combination of DPRC_OBJ_STATE_ states
++ * @label: Object label
++ * @flags: Object's flags
++ */
++struct dprc_obj_desc {
++ char type[16];
++ int id;
++ uint16_t vendor;
++ uint16_t ver_major;
++ uint16_t ver_minor;
++ uint8_t irq_count;
++ uint8_t region_count;
++ uint32_t state;
++ char label[16];
++ uint16_t flags;
++};
++
++/**
++ * dprc_get_obj() - Get general information on an object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_index: Index of the object to be queried (< obj_count)
++ * @obj_desc: Returns the requested object descriptor
++ *
++ * The object descriptors are retrieved one by one by incrementing
++ * obj_index up to (not including) the value of obj_count returned
++ * from dprc_get_obj_count(). dprc_get_obj_count() must
++ * be called prior to dprc_get_obj().
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_obj(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int obj_index,
++ struct dprc_obj_desc *obj_desc);
++
++/**
++ * dprc_get_obj_desc() - Get object descriptor.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_type: The type of the object to get its descriptor.
++ * @obj_id: The id of the object to get its descriptor
++ * @obj_desc: The returned descriptor to fill and return to the user
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ */
++int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ struct dprc_obj_desc *obj_desc);
++
++/**
++ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_type: Type of the object to set its IRQ
++ * @obj_id: ID of the object to set its IRQ
++ * @irq_index: The interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ uint8_t irq_index,
++ struct dprc_irq_cfg *irq_cfg);
++
++/**
++ * dprc_get_obj_irq() - Get IRQ information from object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_type: Type od the object to get its IRQ
++ * @obj_id: ID of the object to get its IRQ
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: The returned IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ uint8_t irq_index,
++ int *type,
++ struct dprc_irq_cfg *irq_cfg);
++
++/**
++ * dprc_get_res_count() - Obtains the number of free resources that are
++ * assigned to this container, by pool type
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @type: pool type
++ * @res_count: Returned number of free resources of the given
++ * resource type that are assigned to this DPRC
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_res_count(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *type,
++ int *res_count);
++
++/**
++ * enum dprc_iter_status - Iteration status
++ * @DPRC_ITER_STATUS_FIRST: Perform first iteration
++ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed
++ * @DPRC_ITER_STATUS_LAST: Indicates last iteration
++ */
++enum dprc_iter_status {
++ DPRC_ITER_STATUS_FIRST = 0,
++ DPRC_ITER_STATUS_MORE = 1,
++ DPRC_ITER_STATUS_LAST = 2
++};
++
++/**
++ * struct dprc_res_ids_range_desc - Resource ID range descriptor
++ * @base_id: Base resource ID of this range
++ * @last_id: Last resource ID of this range
++ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at
++ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE,
++ * additional iterations are needed, until the returned marker is
++ * DPRC_ITER_STATUS_LAST
++ */
++struct dprc_res_ids_range_desc {
++ int base_id;
++ int last_id;
++ enum dprc_iter_status iter_status;
++};
++
++/**
++ * dprc_get_res_ids() - Obtains IDs of free resources in the container
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @type: pool type
++ * @range_desc: range descriptor
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_res_ids(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *type,
++ struct dprc_res_ids_range_desc *range_desc);
++
++/**
++ * Region flags
++ */
++/**
++ * Cacheable - Indicates that region should be mapped as cacheable
++ */
++#define DPRC_REGION_CACHEABLE 0x00000001
++
++/**
++ * enum dprc_region_type - Region type
++ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
++ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
++ */
++enum dprc_region_type {
++ DPRC_REGION_TYPE_MC_PORTAL,
++ DPRC_REGION_TYPE_QBMAN_PORTAL
++};
++
++/**
++ * struct dprc_region_desc - Mappable region descriptor
++ * @base_offset: Region offset from region's base address.
++ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
++ * base address; For DPIO, region base is offset from SoC QMan portals
++ * base address
++ * @size: Region size (in bytes)
++ * @flags: Region attributes
++ * @type: Portal region type
++ */
++struct dprc_region_desc {
++ uint32_t base_offset;
++ uint32_t size;
++ uint32_t flags;
++ enum dprc_region_type type;
++};
++
++/**
++ * dprc_get_obj_region() - Get region information for a specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_type: Object type as returned in dprc_get_obj()
++ * @obj_id: Unique object instance as returned in dprc_get_obj()
++ * @region_index: The specific region to query
++ * @region_desc: Returns the requested region descriptor
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_get_obj_region(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ uint8_t region_index,
++ struct dprc_region_desc *region_desc);
++
++/**
++ * dprc_set_obj_label() - Set object label.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_type: Object's type
++ * @obj_id: Object's ID
++ * @label: The required label. The maximum length is 16 chars.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_set_obj_label(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ char *obj_type,
++ int obj_id,
++ char *label);
++
++/**
++ * struct dprc_endpoint - Endpoint description for link connect/disconnect
++ * operations
++ * @type: Endpoint object type: NULL terminated string
++ * @id: Endpoint object ID
++ * @if_id: Interface ID; should be set for endpoints with multiple
++ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
++ */
++struct dprc_endpoint {
++ char type[16];
++ int id;
++ uint16_t if_id;
++};
++
++/**
++ * struct dprc_connection_cfg - Connection configuration.
++ * Used for virtual connections only
++ * @committed_rate: Committed rate (Mbits/s)
++ * @max_rate: Maximum rate (Mbits/s)
++ */
++struct dprc_connection_cfg {
++ uint32_t committed_rate;
++ uint32_t max_rate;
++};
++
++/**
++ * dprc_connect() - Connect two endpoints to create a network link between them
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @endpoint1: Endpoint 1 configuration parameters
++ * @endpoint2: Endpoint 2 configuration parameters
++ * @cfg: Connection configuration. The connection configuration is ignored for
++ * connections made to DPMAC objects, where rate is retrieved from the
++ * MAC configuration.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_connect(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dprc_endpoint *endpoint1,
++ const struct dprc_endpoint *endpoint2,
++ const struct dprc_connection_cfg *cfg);
++
++/**
++ * dprc_disconnect() - Disconnect one endpoint to remove its network connection
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @endpoint: Endpoint configuration parameters
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprc_disconnect(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dprc_endpoint *endpoint);
++
++/**
++* dprc_get_connection() - Get connected endpoint and link status if connection
++* exists.
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPRC object
++* @endpoint1: Endpoint 1 configuration parameters
++* @endpoint2: Returned endpoint 2 configuration parameters
++* @state: Returned link state:
++* 1 - link is up;
++* 0 - link is down;
++* -1 - no connection (endpoint2 information is irrelevant)
++*
++* Return: '0' on Success; -ENAVAIL if connection does not exist.
++*/
++int dprc_get_connection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dprc_endpoint *endpoint1,
++ struct dprc_endpoint *endpoint2,
++ int *state);
++
++#endif /* _FSL_DPRC_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dprc_cmd.h b/drivers/net/dpaa2/mc/fsl_dprc_cmd.h
+new file mode 100644
+index 0000000..469e286
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dprc_cmd.h
+@@ -0,0 +1,755 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPRC_CMD_H
++#define _FSL_DPRC_CMD_H
++
++/* DPRC Version */
++#define DPRC_VER_MAJOR 5
++#define DPRC_VER_MINOR 1
++
++/* Command IDs */
++#define DPRC_CMDID_CLOSE 0x800
++#define DPRC_CMDID_OPEN 0x805
++#define DPRC_CMDID_CREATE 0x905
++
++#define DPRC_CMDID_GET_ATTR 0x004
++#define DPRC_CMDID_RESET_CONT 0x005
++
++#define DPRC_CMDID_SET_IRQ 0x010
++#define DPRC_CMDID_GET_IRQ 0x011
++#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
++#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
++#define DPRC_CMDID_SET_IRQ_MASK 0x014
++#define DPRC_CMDID_GET_IRQ_MASK 0x015
++#define DPRC_CMDID_GET_IRQ_STATUS 0x016
++#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPRC_CMDID_CREATE_CONT 0x151
++#define DPRC_CMDID_DESTROY_CONT 0x152
++#define DPRC_CMDID_GET_CONT_ID 0x830
++#define DPRC_CMDID_SET_RES_QUOTA 0x155
++#define DPRC_CMDID_GET_RES_QUOTA 0x156
++#define DPRC_CMDID_ASSIGN 0x157
++#define DPRC_CMDID_UNASSIGN 0x158
++#define DPRC_CMDID_GET_OBJ_COUNT 0x159
++#define DPRC_CMDID_GET_OBJ 0x15A
++#define DPRC_CMDID_GET_RES_COUNT 0x15B
++#define DPRC_CMDID_GET_RES_IDS 0x15C
++#define DPRC_CMDID_GET_OBJ_REG 0x15E
++#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
++#define DPRC_CMDID_GET_OBJ_IRQ 0x160
++#define DPRC_CMDID_SET_OBJ_LABEL 0x161
++#define DPRC_CMDID_GET_OBJ_DESC 0x162
++
++#define DPRC_CMDID_CONNECT 0x167
++#define DPRC_CMDID_DISCONNECT 0x168
++#define DPRC_CMDID_GET_POOL 0x169
++#define DPRC_CMDID_GET_POOL_COUNT 0x16A
++
++#define DPRC_CMDID_GET_CONNECTION 0x16C
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_CONTAINER_ID(cmd, container_id) \
++ MC_RSP_OP(cmd, 0, 0, 32, int, container_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_OPEN(cmd, container_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, container_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_CREATE_CONTAINER(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->icid); \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->options); \
++ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->portal_id); \
++ MC_CMD_OP(cmd, 2, 0, 8, char, cfg->label[0]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, cfg->label[1]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, cfg->label[2]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, cfg->label[3]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, cfg->label[4]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, cfg->label[5]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, cfg->label[6]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, cfg->label[7]);\
++ MC_CMD_OP(cmd, 3, 0, 8, char, cfg->label[8]);\
++ MC_CMD_OP(cmd, 3, 8, 8, char, cfg->label[9]);\
++ MC_CMD_OP(cmd, 3, 16, 8, char, cfg->label[10]);\
++ MC_CMD_OP(cmd, 3, 24, 8, char, cfg->label[11]);\
++ MC_CMD_OP(cmd, 3, 32, 8, char, cfg->label[12]);\
++ MC_CMD_OP(cmd, 3, 40, 8, char, cfg->label[13]);\
++ MC_CMD_OP(cmd, 3, 48, 8, char, cfg->label[14]);\
++ MC_CMD_OP(cmd, 3, 56, 8, char, cfg->label[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_CREATE_CONTAINER(cmd, child_container_id, child_portal_offset)\
++do { \
++ MC_RSP_OP(cmd, 1, 0, 32, int, child_container_id); \
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, child_portal_offset);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_DESTROY_CONTAINER(cmd, child_container_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_RESET_CONTAINER(cmd, child_container_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_ATTRIBUTES(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->container_id); \
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->icid); \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\
++ MC_RSP_OP(cmd, 1, 32, 32, int, attr->portal_id); \
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_SET_RES_QUOTA(cmd, child_container_id, type, quota) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, quota);\
++ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\
++ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\
++ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\
++ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\
++ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\
++ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\
++ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\
++ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\
++ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_RES_QUOTA(cmd, child_container_id, type) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \
++ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\
++ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\
++ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\
++ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\
++ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\
++ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\
++ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\
++ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\
++ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\
++} while (0)
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_RES_QUOTA(cmd, quota) \
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, quota)
++
++/* param, offset, width, type, arg_name */
++#define DPRC_CMD_ASSIGN(cmd, container_id, res_req) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, container_id); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, res_req->options);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, res_req->num); \
++ MC_CMD_OP(cmd, 1, 32, 32, int, res_req->id_base_align); \
++ MC_CMD_OP(cmd, 2, 0, 8, char, res_req->type[0]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, res_req->type[1]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, res_req->type[2]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, res_req->type[3]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, res_req->type[4]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, res_req->type[5]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, res_req->type[6]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, res_req->type[7]);\
++ MC_CMD_OP(cmd, 3, 0, 8, char, res_req->type[8]);\
++ MC_CMD_OP(cmd, 3, 8, 8, char, res_req->type[9]);\
++ MC_CMD_OP(cmd, 3, 16, 8, char, res_req->type[10]);\
++ MC_CMD_OP(cmd, 3, 24, 8, char, res_req->type[11]);\
++ MC_CMD_OP(cmd, 3, 32, 8, char, res_req->type[12]);\
++ MC_CMD_OP(cmd, 3, 40, 8, char, res_req->type[13]);\
++ MC_CMD_OP(cmd, 3, 48, 8, char, res_req->type[14]);\
++ MC_CMD_OP(cmd, 3, 56, 8, char, res_req->type[15]);\
++} while (0)
++
++/* param, offset, width, type, arg_name */
++#define DPRC_CMD_UNASSIGN(cmd, child_container_id, res_req) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, res_req->options);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, res_req->num); \
++ MC_CMD_OP(cmd, 1, 32, 32, int, res_req->id_base_align); \
++ MC_CMD_OP(cmd, 2, 0, 8, char, res_req->type[0]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, res_req->type[1]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, res_req->type[2]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, res_req->type[3]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, res_req->type[4]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, res_req->type[5]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, res_req->type[6]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, res_req->type[7]);\
++ MC_CMD_OP(cmd, 3, 0, 8, char, res_req->type[8]);\
++ MC_CMD_OP(cmd, 3, 8, 8, char, res_req->type[9]);\
++ MC_CMD_OP(cmd, 3, 16, 8, char, res_req->type[10]);\
++ MC_CMD_OP(cmd, 3, 24, 8, char, res_req->type[11]);\
++ MC_CMD_OP(cmd, 3, 32, 8, char, res_req->type[12]);\
++ MC_CMD_OP(cmd, 3, 40, 8, char, res_req->type[13]);\
++ MC_CMD_OP(cmd, 3, 48, 8, char, res_req->type[14]);\
++ MC_CMD_OP(cmd, 3, 56, 8, char, res_req->type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_POOL_COUNT(cmd, pool_count) \
++ MC_RSP_OP(cmd, 0, 0, 32, int, pool_count)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_POOL(cmd, pool_index) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, pool_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_POOL(cmd, type) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 8, char, type[0]);\
++ MC_RSP_OP(cmd, 1, 8, 8, char, type[1]);\
++ MC_RSP_OP(cmd, 1, 16, 8, char, type[2]);\
++ MC_RSP_OP(cmd, 1, 24, 8, char, type[3]);\
++ MC_RSP_OP(cmd, 1, 32, 8, char, type[4]);\
++ MC_RSP_OP(cmd, 1, 40, 8, char, type[5]);\
++ MC_RSP_OP(cmd, 1, 48, 8, char, type[6]);\
++ MC_RSP_OP(cmd, 1, 56, 8, char, type[7]);\
++ MC_RSP_OP(cmd, 2, 0, 8, char, type[8]);\
++ MC_RSP_OP(cmd, 2, 8, 8, char, type[9]);\
++ MC_RSP_OP(cmd, 2, 16, 8, char, type[10]);\
++ MC_RSP_OP(cmd, 2, 24, 8, char, type[11]);\
++ MC_RSP_OP(cmd, 2, 32, 8, char, type[12]);\
++ MC_RSP_OP(cmd, 2, 40, 8, char, type[13]);\
++ MC_RSP_OP(cmd, 2, 48, 8, char, type[14]);\
++ MC_RSP_OP(cmd, 2, 56, 8, char, type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_OBJ_COUNT(cmd, obj_count) \
++ MC_RSP_OP(cmd, 0, 32, 32, int, obj_count)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_OBJ(cmd, obj_index) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, obj_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_OBJ(cmd, obj_desc) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \
++ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \
++ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\
++ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\
++ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, obj_desc->flags); \
++ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\
++ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\
++ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\
++ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\
++ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\
++ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\
++ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\
++ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\
++ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\
++ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\
++ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\
++ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\
++ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\
++ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\
++ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\
++ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\
++ MC_RSP_OP(cmd, 5, 0, 8, char, obj_desc->label[0]);\
++ MC_RSP_OP(cmd, 5, 8, 8, char, obj_desc->label[1]);\
++ MC_RSP_OP(cmd, 5, 16, 8, char, obj_desc->label[2]);\
++ MC_RSP_OP(cmd, 5, 24, 8, char, obj_desc->label[3]);\
++ MC_RSP_OP(cmd, 5, 32, 8, char, obj_desc->label[4]);\
++ MC_RSP_OP(cmd, 5, 40, 8, char, obj_desc->label[5]);\
++ MC_RSP_OP(cmd, 5, 48, 8, char, obj_desc->label[6]);\
++ MC_RSP_OP(cmd, 5, 56, 8, char, obj_desc->label[7]);\
++ MC_RSP_OP(cmd, 6, 0, 8, char, obj_desc->label[8]);\
++ MC_RSP_OP(cmd, 6, 8, 8, char, obj_desc->label[9]);\
++ MC_RSP_OP(cmd, 6, 16, 8, char, obj_desc->label[10]);\
++ MC_RSP_OP(cmd, 6, 24, 8, char, obj_desc->label[11]);\
++ MC_RSP_OP(cmd, 6, 32, 8, char, obj_desc->label[12]);\
++ MC_RSP_OP(cmd, 6, 40, 8, char, obj_desc->label[13]);\
++ MC_RSP_OP(cmd, 6, 48, 8, char, obj_desc->label[14]);\
++ MC_RSP_OP(cmd, 6, 56, 8, char, obj_desc->label[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_OBJ_DESC(cmd, obj_type, obj_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id);\
++ MC_CMD_OP(cmd, 1, 0, 8, char, obj_type[0]);\
++ MC_CMD_OP(cmd, 1, 8, 8, char, obj_type[1]);\
++ MC_CMD_OP(cmd, 1, 16, 8, char, obj_type[2]);\
++ MC_CMD_OP(cmd, 1, 24, 8, char, obj_type[3]);\
++ MC_CMD_OP(cmd, 1, 32, 8, char, obj_type[4]);\
++ MC_CMD_OP(cmd, 1, 40, 8, char, obj_type[5]);\
++ MC_CMD_OP(cmd, 1, 48, 8, char, obj_type[6]);\
++ MC_CMD_OP(cmd, 1, 56, 8, char, obj_type[7]);\
++ MC_CMD_OP(cmd, 2, 0, 8, char, obj_type[8]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, obj_type[9]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, obj_type[10]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, obj_type[11]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, obj_type[12]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, obj_type[13]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, obj_type[14]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, obj_type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_OBJ_DESC(cmd, obj_desc) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \
++ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \
++ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\
++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\
++ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\
++ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, obj_desc->flags); \
++ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\
++ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\
++ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\
++ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\
++ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\
++ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\
++ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\
++ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\
++ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\
++ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\
++ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\
++ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\
++ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\
++ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\
++ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\
++ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\
++ MC_RSP_OP(cmd, 5, 0, 8, char, obj_desc->label[0]);\
++ MC_RSP_OP(cmd, 5, 8, 8, char, obj_desc->label[1]);\
++ MC_RSP_OP(cmd, 5, 16, 8, char, obj_desc->label[2]);\
++ MC_RSP_OP(cmd, 5, 24, 8, char, obj_desc->label[3]);\
++ MC_RSP_OP(cmd, 5, 32, 8, char, obj_desc->label[4]);\
++ MC_RSP_OP(cmd, 5, 40, 8, char, obj_desc->label[5]);\
++ MC_RSP_OP(cmd, 5, 48, 8, char, obj_desc->label[6]);\
++ MC_RSP_OP(cmd, 5, 56, 8, char, obj_desc->label[7]);\
++ MC_RSP_OP(cmd, 6, 0, 8, char, obj_desc->label[8]);\
++ MC_RSP_OP(cmd, 6, 8, 8, char, obj_desc->label[9]);\
++ MC_RSP_OP(cmd, 6, 16, 8, char, obj_desc->label[10]);\
++ MC_RSP_OP(cmd, 6, 24, 8, char, obj_desc->label[11]);\
++ MC_RSP_OP(cmd, 6, 32, 8, char, obj_desc->label[12]);\
++ MC_RSP_OP(cmd, 6, 40, 8, char, obj_desc->label[13]);\
++ MC_RSP_OP(cmd, 6, 48, 8, char, obj_desc->label[14]);\
++ MC_RSP_OP(cmd, 6, 56, 8, char, obj_desc->label[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_RES_COUNT(cmd, type) \
++do { \
++ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\
++ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\
++ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\
++ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\
++ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\
++ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\
++ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\
++ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\
++ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_RES_COUNT(cmd, res_count) \
++ MC_RSP_OP(cmd, 0, 0, 32, int, res_count)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_RES_IDS(cmd, range_desc, type) \
++do { \
++ MC_CMD_OP(cmd, 0, 42, 7, enum dprc_iter_status, \
++ range_desc->iter_status); \
++ MC_CMD_OP(cmd, 1, 0, 32, int, range_desc->base_id); \
++ MC_CMD_OP(cmd, 1, 32, 32, int, range_desc->last_id);\
++ MC_CMD_OP(cmd, 2, 0, 8, char, type[0]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, type[1]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, type[2]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, type[3]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, type[4]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, type[5]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, type[6]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, type[7]);\
++ MC_CMD_OP(cmd, 3, 0, 8, char, type[8]);\
++ MC_CMD_OP(cmd, 3, 8, 8, char, type[9]);\
++ MC_CMD_OP(cmd, 3, 16, 8, char, type[10]);\
++ MC_CMD_OP(cmd, 3, 24, 8, char, type[11]);\
++ MC_CMD_OP(cmd, 3, 32, 8, char, type[12]);\
++ MC_CMD_OP(cmd, 3, 40, 8, char, type[13]);\
++ MC_CMD_OP(cmd, 3, 48, 8, char, type[14]);\
++ MC_CMD_OP(cmd, 3, 56, 8, char, type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_RES_IDS(cmd, range_desc) \
++do { \
++ MC_RSP_OP(cmd, 0, 42, 7, enum dprc_iter_status, \
++ range_desc->iter_status);\
++ MC_RSP_OP(cmd, 1, 0, 32, int, range_desc->base_id); \
++ MC_RSP_OP(cmd, 1, 32, 32, int, range_desc->last_id);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, region_index);\
++ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\
++ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\
++ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\
++ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\
++ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\
++ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\
++ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\
++ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\
++ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\
++ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\
++ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\
++ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\
++ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\
++ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\
++ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\
++ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\
++} while (0)
++
++/* param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_OBJ_REGION(cmd, region_desc) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, region_desc->base_offset);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, region_desc->size); \
++ MC_RSP_OP(cmd, 2, 32, 4, enum dprc_region_type, region_desc->type);\
++ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, region_desc->flags);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_SET_OBJ_LABEL(cmd, obj_type, obj_id, label) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \
++ MC_CMD_OP(cmd, 1, 0, 8, char, label[0]);\
++ MC_CMD_OP(cmd, 1, 8, 8, char, label[1]);\
++ MC_CMD_OP(cmd, 1, 16, 8, char, label[2]);\
++ MC_CMD_OP(cmd, 1, 24, 8, char, label[3]);\
++ MC_CMD_OP(cmd, 1, 32, 8, char, label[4]);\
++ MC_CMD_OP(cmd, 1, 40, 8, char, label[5]);\
++ MC_CMD_OP(cmd, 1, 48, 8, char, label[6]);\
++ MC_CMD_OP(cmd, 1, 56, 8, char, label[7]);\
++ MC_CMD_OP(cmd, 2, 0, 8, char, label[8]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, label[9]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, label[10]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, label[11]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, label[12]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, label[13]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, label[14]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, label[15]);\
++ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\
++ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\
++ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\
++ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\
++ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\
++ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\
++ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\
++ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\
++ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\
++ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\
++ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\
++ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\
++ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\
++ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\
++ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\
++ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_SET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_CMD_OP(cmd, 2, 32, 32, int, obj_id); \
++ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\
++ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\
++ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\
++ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\
++ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\
++ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\
++ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\
++ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\
++ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\
++ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\
++ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\
++ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\
++ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\
++ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\
++ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\
++ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++ MC_CMD_OP(cmd, 1, 0, 8, char, obj_type[0]);\
++ MC_CMD_OP(cmd, 1, 8, 8, char, obj_type[1]);\
++ MC_CMD_OP(cmd, 1, 16, 8, char, obj_type[2]);\
++ MC_CMD_OP(cmd, 1, 24, 8, char, obj_type[3]);\
++ MC_CMD_OP(cmd, 1, 32, 8, char, obj_type[4]);\
++ MC_CMD_OP(cmd, 1, 40, 8, char, obj_type[5]);\
++ MC_CMD_OP(cmd, 1, 48, 8, char, obj_type[6]);\
++ MC_CMD_OP(cmd, 1, 56, 8, char, obj_type[7]);\
++ MC_CMD_OP(cmd, 2, 0, 8, char, obj_type[8]);\
++ MC_CMD_OP(cmd, 2, 8, 8, char, obj_type[9]);\
++ MC_CMD_OP(cmd, 2, 16, 8, char, obj_type[10]);\
++ MC_CMD_OP(cmd, 2, 24, 8, char, obj_type[11]);\
++ MC_CMD_OP(cmd, 2, 32, 8, char, obj_type[12]);\
++ MC_CMD_OP(cmd, 2, 40, 8, char, obj_type[13]);\
++ MC_CMD_OP(cmd, 2, 48, 8, char, obj_type[14]);\
++ MC_CMD_OP(cmd, 2, 56, 8, char, obj_type[15]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_OBJ_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint1->if_id); \
++ MC_CMD_OP(cmd, 1, 0, 32, int, endpoint2->id); \
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, endpoint2->if_id); \
++ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[0]); \
++ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[1]); \
++ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[2]); \
++ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[3]); \
++ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[4]); \
++ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[5]); \
++ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[6]); \
++ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[7]); \
++ MC_CMD_OP(cmd, 3, 0, 8, char, endpoint1->type[8]); \
++ MC_CMD_OP(cmd, 3, 8, 8, char, endpoint1->type[9]); \
++ MC_CMD_OP(cmd, 3, 16, 8, char, endpoint1->type[10]); \
++ MC_CMD_OP(cmd, 3, 24, 8, char, endpoint1->type[11]); \
++ MC_CMD_OP(cmd, 3, 32, 8, char, endpoint1->type[12]); \
++ MC_CMD_OP(cmd, 3, 40, 8, char, endpoint1->type[13]); \
++ MC_CMD_OP(cmd, 3, 48, 8, char, endpoint1->type[14]); \
++ MC_CMD_OP(cmd, 3, 56, 8, char, endpoint1->type[15]); \
++ MC_CMD_OP(cmd, 4, 0, 32, uint32_t, cfg->max_rate); \
++ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->committed_rate); \
++ MC_CMD_OP(cmd, 5, 0, 8, char, endpoint2->type[0]); \
++ MC_CMD_OP(cmd, 5, 8, 8, char, endpoint2->type[1]); \
++ MC_CMD_OP(cmd, 5, 16, 8, char, endpoint2->type[2]); \
++ MC_CMD_OP(cmd, 5, 24, 8, char, endpoint2->type[3]); \
++ MC_CMD_OP(cmd, 5, 32, 8, char, endpoint2->type[4]); \
++ MC_CMD_OP(cmd, 5, 40, 8, char, endpoint2->type[5]); \
++ MC_CMD_OP(cmd, 5, 48, 8, char, endpoint2->type[6]); \
++ MC_CMD_OP(cmd, 5, 56, 8, char, endpoint2->type[7]); \
++ MC_CMD_OP(cmd, 6, 0, 8, char, endpoint2->type[8]); \
++ MC_CMD_OP(cmd, 6, 8, 8, char, endpoint2->type[9]); \
++ MC_CMD_OP(cmd, 6, 16, 8, char, endpoint2->type[10]); \
++ MC_CMD_OP(cmd, 6, 24, 8, char, endpoint2->type[11]); \
++ MC_CMD_OP(cmd, 6, 32, 8, char, endpoint2->type[12]); \
++ MC_CMD_OP(cmd, 6, 40, 8, char, endpoint2->type[13]); \
++ MC_CMD_OP(cmd, 6, 48, 8, char, endpoint2->type[14]); \
++ MC_CMD_OP(cmd, 6, 56, 8, char, endpoint2->type[15]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_DISCONNECT(cmd, endpoint) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint->id); \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint->if_id); \
++ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint->type[0]); \
++ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint->type[1]); \
++ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint->type[2]); \
++ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint->type[3]); \
++ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint->type[4]); \
++ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint->type[5]); \
++ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint->type[6]); \
++ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint->type[7]); \
++ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint->type[8]); \
++ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint->type[9]); \
++ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint->type[10]); \
++ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint->type[11]); \
++ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint->type[12]); \
++ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint->type[13]); \
++ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint->type[14]); \
++ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint->type[15]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_CMD_GET_CONNECTION(cmd, endpoint1) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint1->if_id); \
++ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint1->type[0]); \
++ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint1->type[1]); \
++ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint1->type[2]); \
++ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint1->type[3]); \
++ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint1->type[4]); \
++ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint1->type[5]); \
++ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint1->type[6]); \
++ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint1->type[7]); \
++ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[8]); \
++ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[9]); \
++ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[10]); \
++ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[11]); \
++ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[12]); \
++ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[13]); \
++ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[14]); \
++ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[15]); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRC_RSP_GET_CONNECTION(cmd, endpoint2, state) \
++do { \
++ MC_RSP_OP(cmd, 3, 0, 32, int, endpoint2->id); \
++ MC_RSP_OP(cmd, 3, 32, 16, uint16_t, endpoint2->if_id); \
++ MC_RSP_OP(cmd, 4, 0, 8, char, endpoint2->type[0]); \
++ MC_RSP_OP(cmd, 4, 8, 8, char, endpoint2->type[1]); \
++ MC_RSP_OP(cmd, 4, 16, 8, char, endpoint2->type[2]); \
++ MC_RSP_OP(cmd, 4, 24, 8, char, endpoint2->type[3]); \
++ MC_RSP_OP(cmd, 4, 32, 8, char, endpoint2->type[4]); \
++ MC_RSP_OP(cmd, 4, 40, 8, char, endpoint2->type[5]); \
++ MC_RSP_OP(cmd, 4, 48, 8, char, endpoint2->type[6]); \
++ MC_RSP_OP(cmd, 4, 56, 8, char, endpoint2->type[7]); \
++ MC_RSP_OP(cmd, 5, 0, 8, char, endpoint2->type[8]); \
++ MC_RSP_OP(cmd, 5, 8, 8, char, endpoint2->type[9]); \
++ MC_RSP_OP(cmd, 5, 16, 8, char, endpoint2->type[10]); \
++ MC_RSP_OP(cmd, 5, 24, 8, char, endpoint2->type[11]); \
++ MC_RSP_OP(cmd, 5, 32, 8, char, endpoint2->type[12]); \
++ MC_RSP_OP(cmd, 5, 40, 8, char, endpoint2->type[13]); \
++ MC_RSP_OP(cmd, 5, 48, 8, char, endpoint2->type[14]); \
++ MC_RSP_OP(cmd, 5, 56, 8, char, endpoint2->type[15]); \
++ MC_RSP_OP(cmd, 6, 0, 32, int, state); \
++} while (0)
++
++#endif /* _FSL_DPRC_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dprtc.h b/drivers/net/dpaa2/mc/fsl_dprtc.h
+new file mode 100644
+index 0000000..2eb6edc
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dprtc.h
+@@ -0,0 +1,434 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPRTC_H
++#define __FSL_DPRTC_H
++
++/* Data Path Real Time Counter API
++ * Contains initialization APIs and runtime control APIs for RTC
++ */
++
++struct fsl_mc_io;
++
++/**
++ * Number of irq's
++ */
++#define DPRTC_MAX_IRQ_NUM 1
++#define DPRTC_IRQ_INDEX 0
++
++/**
++ * Interrupt event masks:
++ */
++
++/**
++ * Interrupt event mask indicating alarm event had occured
++ */
++#define DPRTC_EVENT_ALARM 0x40000000
++/**
++ * Interrupt event mask indicating periodic pulse event had occured
++ */
++#define DPRTC_EVENT_PPS 0x08000000
++
++/**
++ * dprtc_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dprtc_id: DPRTC unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dprtc_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dprtc_id,
++ uint16_t *token);
++
++/**
++ * dprtc_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dprtc_cfg - Structure representing DPRTC configuration
++ * @options: place holder
++ */
++struct dprtc_cfg {
++ uint32_t options;
++};
++
++/**
++ * dprtc_create() - Create the DPRTC object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPRTC object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dprtc_open function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dprtc_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dprtc_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dprtc_set_clock_offset() - Sets the clock's offset
++ * (usually relative to another clock).
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @offset: New clock offset (in nanoseconds).
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int64_t offset);
++
++/**
++ * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @freq_compensation:
++ * The new frequency compensation value to set.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t freq_compensation);
++
++/**
++ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @freq_compensation:
++ * Frequency compensation value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint32_t *freq_compensation);
++
++/**
++ * dprtc_get_time() - Returns the current RTC time.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @time: Current RTC time.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_time(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t *time);
++
++/**
++ * dprtc_set_time() - Updates current RTC time.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @time: New RTC time.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_time(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t time);
++
++/**
++ * dprtc_set_alarm() - Defines and sets alarm.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @time: In nanoseconds, the time when the alarm
++ * should go off - must be a multiple of
++ * 1 microsecond
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_alarm(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint64_t time);
++
++/**
++ * struct dprtc_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dprtc_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dprtc_set_irq() - Set IRQ information for the DPRTC to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dprtc_irq_cfg *irq_cfg);
++
++/**
++ * dprtc_get_irq() - Get IRQ information from the DPRTC.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dprtc_irq_cfg *irq_cfg);
++
++/**
++ * dprtc_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dprtc_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dprtc_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dprtc_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dprtc_clear_irq_status() - Clear a pending interrupt's status
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @irq_index: The interrupt index to configure
++ * @status: Bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dprtc_attr - Structure representing DPRTC attributes
++ * @id: DPRTC object ID
++ * @version: DPRTC version
++ */
++struct dprtc_attr {
++ int id;
++ /**
++ * struct version - Structure representing DPRTC version
++ * @major: DPRTC major version
++ * @minor: DPRTC minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++};
++
++/**
++ * dprtc_get_attributes - Retrieve DPRTC attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRTC object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dprtc_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dprtc_attr *attr);
++
++#endif /* __FSL_DPRTC_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h b/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h
+new file mode 100644
+index 0000000..aeccece
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h
+@@ -0,0 +1,181 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPRTC_CMD_H
++#define _FSL_DPRTC_CMD_H
++
++/* DPRTC Version */
++#define DPRTC_VER_MAJOR 1
++#define DPRTC_VER_MINOR 0
++
++/* Command IDs */
++#define DPRTC_CMDID_CLOSE 0x800
++#define DPRTC_CMDID_OPEN 0x810
++#define DPRTC_CMDID_CREATE 0x910
++#define DPRTC_CMDID_DESTROY 0x900
++
++#define DPRTC_CMDID_ENABLE 0x002
++#define DPRTC_CMDID_DISABLE 0x003
++#define DPRTC_CMDID_GET_ATTR 0x004
++#define DPRTC_CMDID_RESET 0x005
++#define DPRTC_CMDID_IS_ENABLED 0x006
++
++#define DPRTC_CMDID_SET_IRQ 0x010
++#define DPRTC_CMDID_GET_IRQ 0x011
++#define DPRTC_CMDID_SET_IRQ_ENABLE 0x012
++#define DPRTC_CMDID_GET_IRQ_ENABLE 0x013
++#define DPRTC_CMDID_SET_IRQ_MASK 0x014
++#define DPRTC_CMDID_GET_IRQ_MASK 0x015
++#define DPRTC_CMDID_GET_IRQ_STATUS 0x016
++#define DPRTC_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPRTC_CMDID_SET_CLOCK_OFFSET 0x1d0
++#define DPRTC_CMDID_SET_FREQ_COMPENSATION 0x1d1
++#define DPRTC_CMDID_GET_FREQ_COMPENSATION 0x1d2
++#define DPRTC_CMDID_GET_TIME 0x1d3
++#define DPRTC_CMDID_SET_TIME 0x1d4
++#define DPRTC_CMDID_SET_ALARM 0x1d5
++#define DPRTC_CMDID_SET_PERIODIC_PULSE 0x1d6
++#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE 0x1d7
++#define DPRTC_CMDID_SET_EXT_TRIGGER 0x1d8
++#define DPRTC_CMDID_CLEAR_EXT_TRIGGER 0x1d9
++#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP 0x1dA
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_OPEN(cmd, dpbp_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_RSP_GET_IRQ_ENABLE(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_RSP_GET_ATTRIBUTES(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_SET_CLOCK_OFFSET(cmd, offset) \
++ MC_CMD_OP(cmd, 0, 0, 64, int64_t, offset)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_SET_FREQ_COMPENSATION(cmd, freq_compensation) \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, freq_compensation)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_RSP_GET_FREQ_COMPENSATION(cmd, freq_compensation) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, freq_compensation)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_RSP_GET_TIME(cmd, time) \
++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, time)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_SET_TIME(cmd, time) \
++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPRTC_CMD_SET_ALARM(cmd, time) \
++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time)
++
++#endif /* _FSL_DPRTC_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpseci.h b/drivers/net/dpaa2/mc/fsl_dpseci.h
+new file mode 100644
+index 0000000..1dd7215
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpseci.h
+@@ -0,0 +1,647 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPSECI_H
++#define __FSL_DPSECI_H
++
++/* Data Path SEC Interface API
++ * Contains initialization APIs and runtime control APIs for DPSECI
++ */
++
++struct fsl_mc_io;
++
++/**
++ * General DPSECI macros
++ */
++
++/**
++ * Maximum number of Tx/Rx priorities per DPSECI object
++ */
++#define DPSECI_PRIO_NUM 8
++
++/**
++ * All queues considered; see dpseci_set_rx_queue()
++ */
++#define DPSECI_ALL_QUEUES (uint8_t)(-1)
++
++/**
++ * dpseci_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpseci_id: DPSECI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpseci_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpseci_id,
++ uint16_t *token);
++
++/**
++ * dpseci_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpseci_cfg - Structure representing DPSECI configuration
++ * @num_tx_queues: num of queues towards the SEC
++ * @num_rx_queues: num of queues back from the SEC
++ * @priorities: Priorities for the SEC hardware processing;
++ * each place in the array is the priority of the tx queue
++ * towards the SEC,
++ * valid priorities are configured with values 1-8;
++ */
++struct dpseci_cfg {
++ uint8_t num_tx_queues;
++ uint8_t num_rx_queues;
++ uint8_t priorities[DPSECI_PRIO_NUM];
++};
++
++/**
++ * dpseci_create() - Create the DPSECI object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPSECI object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpseci_open() function to get an authentication
++ * token first.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpseci_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpseci_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpseci_is_enabled() - Check if the DPSECI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * struct dpseci_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpseci_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpseci_set_irq() - Set IRQ information for the DPSECI to trigger an interrupt
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpseci_irq_cfg *irq_cfg);
++
++/**
++ * dpseci_get_irq() - Get IRQ information from the DPSECI
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpseci_irq_cfg *irq_cfg);
++
++/**
++ * dpseci_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpseci_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned Interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpseci_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpseci_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpseci_get_irq_status() - Get the current status of any pending interrupts
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpseci_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++
++/**
++ * struct dpseci_attr - Structure representing DPSECI attributes
++ * @id: DPSECI object ID
++ * @version: DPSECI version
++ * @num_tx_queues: number of queues towards the SEC
++ * @num_rx_queues: number of queues back from the SEC
++ */
++struct dpseci_attr {
++ int id;
++ /**
++ * struct version - DPSECI version
++ * @major: DPSECI major version
++ * @minor: DPSECI minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint8_t num_tx_queues;
++ uint8_t num_rx_queues;
++};
++
++/**
++ * dpseci_get_attributes() - Retrieve DPSECI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpseci_attr *attr);
++
++/**
++ * enum dpseci_dest - DPSECI destination types
++ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
++ * and does not generate FQDAN notifications; user is expected to
++ * dequeue from the queue based on polling or other user-defined
++ * method
++ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue
++ * from the queue only after notification is received
++ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON
++ * object; user is expected to dequeue from the DPCON channel
++ */
++enum dpseci_dest {
++ DPSECI_DEST_NONE = 0,
++ DPSECI_DEST_DPIO = 1,
++ DPSECI_DEST_DPCON = 2
++};
++
++/**
++ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid values
++ * are 0-1 or 0-7, depending on the number of priorities in that
++ * channel; not relevant for 'DPSECI_DEST_NONE' option
++ */
++struct dpseci_dest_cfg {
++ enum dpseci_dest dest_type;
++ int dest_id;
++ uint8_t priority;
++};
++
++/**
++ * DPSECI queue modification options
++ */
++
++/**
++ * Select to modify the user's context associated with the queue
++ */
++#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
++
++/**
++ * Select to modify the queue's destination
++ */
++#define DPSECI_QUEUE_OPT_DEST 0x00000002
++
++/**
++ * Select to modify the queue's order preservation
++ */
++#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
++
++/**
++ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
++ * @options: Flags representing the suggested modifications to the queue;
++ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
++ * @order_preservation_en: order preservation configuration for the rx queue
++ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame;
++ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
++ * @dest_cfg: Queue destination parameters;
++ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
++ */
++struct dpseci_rx_queue_cfg {
++ uint32_t options;
++ int order_preservation_en;
++ uint64_t user_ctx;
++ struct dpseci_dest_cfg dest_cfg;
++};
++
++/**
++ * dpseci_set_rx_queue() - Set Rx queue configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @queue: Select the queue relative to number of
++ * priorities configured at DPSECI creation; use
++ * DPSECI_ALL_QUEUES to configure all Rx queues identically.
++ * @cfg: Rx queue configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t queue,
++ const struct dpseci_rx_queue_cfg *cfg);
++
++/**
++ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
++ * @user_ctx: User context value provided in the frame descriptor of each
++ * dequeued frame
++ * @order_preservation_en: Status of the order preservation configuration
++ * on the queue
++ * @dest_cfg: Queue destination configuration
++ * @fqid: Virtual FQID value to be used for dequeue operations
++ */
++struct dpseci_rx_queue_attr {
++ uint64_t user_ctx;
++ int order_preservation_en;
++ struct dpseci_dest_cfg dest_cfg;
++ uint32_t fqid;
++};
++
++/**
++ * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @queue: Select the queue relative to number of
++ * priorities configured at DPSECI creation
++ * @attr: Returned Rx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t queue,
++ struct dpseci_rx_queue_attr *attr);
++
++/**
++ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
++ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
++ * @priority: SEC hardware processing priority for the queue
++ */
++struct dpseci_tx_queue_attr {
++ uint32_t fqid;
++ uint8_t priority;
++};
++
++/**
++ * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @queue: Select the queue relative to number of
++ * priorities configured at DPSECI creation
++ * @attr: Returned Tx queue attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t queue,
++ struct dpseci_tx_queue_attr *attr);
++
++/**
++ * struct dpseci_sec_attr - Structure representing attributes of the SEC
++ * hardware accelerator
++ * @ip_id: ID for SEC.
++ * @major_rev: Major revision number for SEC.
++ * @minor_rev: Minor revision number for SEC.
++ * @era: SEC Era.
++ * @deco_num: The number of copies of the DECO that are implemented in
++ * this version of SEC.
++ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented
++ * in this version of SEC.
++ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented
++ * in this version of SEC.
++ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
++ * implemented in this version of SEC.
++ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
++ * implemented in this version of SEC.
++ * @crc_acc_num: The number of copies of the CRC module that are implemented
++ * in this version of SEC.
++ * @pk_acc_num: The number of copies of the Public Key module that are
++ * implemented in this version of SEC.
++ * @kasumi_acc_num: The number of copies of the Kasumi module that are
++ * implemented in this version of SEC.
++ * @rng_acc_num: The number of copies of the Random Number Generator that are
++ * implemented in this version of SEC.
++ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
++ * implemented in this version of SEC.
++ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
++ * in this version of SEC.
++ * @des_acc_num: The number of copies of the DES module that are implemented
++ * in this version of SEC.
++ * @aes_acc_num: The number of copies of the AES module that are implemented
++ * in this version of SEC.
++ **/
++
++struct dpseci_sec_attr {
++ uint16_t ip_id;
++ uint8_t major_rev;
++ uint8_t minor_rev;
++ uint8_t era;
++ uint8_t deco_num;
++ uint8_t zuc_auth_acc_num;
++ uint8_t zuc_enc_acc_num;
++ uint8_t snow_f8_acc_num;
++ uint8_t snow_f9_acc_num;
++ uint8_t crc_acc_num;
++ uint8_t pk_acc_num;
++ uint8_t kasumi_acc_num;
++ uint8_t rng_acc_num;
++ uint8_t md_acc_num;
++ uint8_t arc4_acc_num;
++ uint8_t des_acc_num;
++ uint8_t aes_acc_num;
++};
++
++/**
++ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @attr: Returned SEC attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpseci_sec_attr *attr);
++
++/**
++ * struct dpseci_sec_counters - Structure representing global SEC counters and
++ * not per dpseci counters
++ * @dequeued_requests: Number of Requests Dequeued
++ * @ob_enc_requests: Number of Outbound Encrypt Requests
++ * @ib_dec_requests: Number of Inbound Decrypt Requests
++ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
++ * @ob_prot_bytes: Number of Outbound Bytes Protected
++ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
++ * @ib_valid_bytes: Number of Inbound Bytes Validated
++ */
++struct dpseci_sec_counters {
++ uint64_t dequeued_requests;
++ uint64_t ob_enc_requests;
++ uint64_t ib_dec_requests;
++ uint64_t ob_enc_bytes;
++ uint64_t ob_prot_bytes;
++ uint64_t ib_dec_bytes;
++ uint64_t ib_valid_bytes;
++};
++
++/**
++ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSECI object
++ * @counters: Returned SEC counters
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpseci_sec_counters *counters);
++
++#endif /* __FSL_DPSECI_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h b/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h
+new file mode 100644
+index 0000000..6c0b96e
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h
+@@ -0,0 +1,241 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_DPSECI_CMD_H
++#define _FSL_DPSECI_CMD_H
++
++/* DPSECI Version */
++#define DPSECI_VER_MAJOR 3
++#define DPSECI_VER_MINOR 1
++
++/* Command IDs */
++#define DPSECI_CMDID_CLOSE 0x800
++#define DPSECI_CMDID_OPEN 0x809
++#define DPSECI_CMDID_CREATE 0x909
++#define DPSECI_CMDID_DESTROY 0x900
++
++#define DPSECI_CMDID_ENABLE 0x002
++#define DPSECI_CMDID_DISABLE 0x003
++#define DPSECI_CMDID_GET_ATTR 0x004
++#define DPSECI_CMDID_RESET 0x005
++#define DPSECI_CMDID_IS_ENABLED 0x006
++
++#define DPSECI_CMDID_SET_IRQ 0x010
++#define DPSECI_CMDID_GET_IRQ 0x011
++#define DPSECI_CMDID_SET_IRQ_ENABLE 0x012
++#define DPSECI_CMDID_GET_IRQ_ENABLE 0x013
++#define DPSECI_CMDID_SET_IRQ_MASK 0x014
++#define DPSECI_CMDID_GET_IRQ_MASK 0x015
++#define DPSECI_CMDID_GET_IRQ_STATUS 0x016
++#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPSECI_CMDID_SET_RX_QUEUE 0x194
++#define DPSECI_CMDID_GET_RX_QUEUE 0x196
++#define DPSECI_CMDID_GET_TX_QUEUE 0x197
++#define DPSECI_CMDID_GET_SEC_ATTR 0x198
++#define DPSECI_CMDID_GET_SEC_COUNTERS 0x199
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_OPEN(cmd, dpseci_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->priorities[0]);\
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[1]);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[2]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->priorities[3]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priorities[4]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->priorities[5]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->priorities[6]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->priorities[7]);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->num_tx_queues);\
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->num_rx_queues);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
++ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_tx_queues); \
++ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->num_rx_queues); \
++ MC_RSP_OP(cmd, 5, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 5, 16, 16, uint16_t, attr->version.minor);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue); \
++ MC_CMD_OP(cmd, 0, 48, 4, enum dpseci_dest, cfg->dest_cfg.dest_type); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
++ MC_CMD_OP(cmd, 2, 32, 1, int, cfg->order_preservation_en);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_GET_RX_QUEUE(cmd, queue) \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_RX_QUEUE(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
++ MC_RSP_OP(cmd, 0, 48, 4, enum dpseci_dest, attr->dest_cfg.dest_type);\
++ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
++ MC_RSP_OP(cmd, 2, 32, 1, int, attr->order_preservation_en);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_CMD_GET_TX_QUEUE(cmd, queue) \
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_TX_QUEUE(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid);\
++ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->priority);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_SEC_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->ip_id);\
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->major_rev);\
++ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->minor_rev);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->era);\
++ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->deco_num);\
++ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->zuc_auth_acc_num);\
++ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, attr->zuc_enc_acc_num);\
++ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, attr->snow_f8_acc_num);\
++ MC_RSP_OP(cmd, 1, 40, 8, uint8_t, attr->snow_f9_acc_num);\
++ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, attr->crc_acc_num);\
++ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->pk_acc_num);\
++ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->kasumi_acc_num);\
++ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->rng_acc_num);\
++ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->md_acc_num);\
++ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->arc4_acc_num);\
++ MC_RSP_OP(cmd, 2, 48, 8, uint8_t, attr->des_acc_num);\
++ MC_RSP_OP(cmd, 2, 56, 8, uint8_t, attr->aes_acc_num);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, counters->dequeued_requests);\
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counters->ob_enc_requests);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, counters->ib_dec_requests);\
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, counters->ob_enc_bytes);\
++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, counters->ob_prot_bytes);\
++ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, counters->ib_dec_bytes);\
++ MC_RSP_OP(cmd, 6, 0, 64, uint64_t, counters->ib_valid_bytes);\
++} while (0)
++
++#endif /* _FSL_DPSECI_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpsw.h b/drivers/net/dpaa2/mc/fsl_dpsw.h
+new file mode 100644
+index 0000000..9c1bd9d
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpsw.h
+@@ -0,0 +1,2164 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPSW_H
++#define __FSL_DPSW_H
++
++#include <fsl_net.h>
++
++/* Data Path L2-Switch API
++ * Contains API for handling DPSW topology and functionality
++ */
++
++struct fsl_mc_io;
++
++/**
++ * DPSW general definitions
++ */
++
++/**
++ * Maximum number of traffic class priorities
++ */
++#define DPSW_MAX_PRIORITIES 8
++/**
++ * Maximum number of interfaces
++ */
++#define DPSW_MAX_IF 64
++
++/**
++ * dpsw_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpsw_id: DPSW unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpsw_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_open(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ int dpsw_id,
++ uint16_t *token);
++
++/**
++ * dpsw_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_close(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPSW options
++ */
++
++/**
++ * Disable flooding
++ */
++#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
++/**
++ * Disable Multicast
++ */
++#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
++/**
++ * Support control interface
++ */
++#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
++/**
++ * Disable flooding metering
++ */
++#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
++/**
++ * Enable metering
++ */
++#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
++
++/**
++ * enum dpsw_component_type - component type of a bridge
++ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
++ * enterprise VLAN bridge or of a Provider Bridge used
++ * to process C-tagged frames
++ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
++ * Provider Bridge
++ *
++ */
++enum dpsw_component_type {
++ DPSW_COMPONENT_TYPE_C_VLAN = 0,
++ DPSW_COMPONENT_TYPE_S_VLAN
++};
++
++/**
++ * struct dpsw_cfg - DPSW configuration
++ * @num_ifs: Number of external and internal interfaces
++ * @adv: Advanced parameters; default is all zeros;
++ * use this structure to change default settings
++ */
++struct dpsw_cfg {
++ uint16_t num_ifs;
++ /**
++ * struct adv - Advanced parameters
++ * @options: Enable/Disable DPSW features (bitmap)
++ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
++ * @max_meters_per_if: Number of meters per interface
++ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
++ * @max_fdb_entries: Number of FDB entries for default FDB table;
++ * 0 - indicates default 1024 entries.
++ * @fdb_aging_time: Default FDB aging time for default FDB table;
++ * 0 - indicates default 300 seconds
++ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
++ * 0 - indicates default 32
++ * @component_type: Indicates the component type of this bridge
++ */
++ struct {
++ uint64_t options;
++ uint16_t max_vlans;
++ uint8_t max_meters_per_if;
++ uint8_t max_fdbs;
++ uint16_t max_fdb_entries;
++ uint16_t fdb_aging_time;
++ uint16_t max_fdb_mc_groups;
++ enum dpsw_component_type component_type;
++ } adv;
++};
++
++/**
++ * dpsw_create() - Create the DPSW object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @cfg: Configuration structure
++ * @token: Returned token; use in subsequent API calls
++ *
++ * Create the DPSW object, allocate required resources and
++ * perform required initialization.
++ *
++ * The object can be created either by declaring it in the
++ * DPL file, or by calling this function.
++ *
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent calls to
++ * this specific object. For objects that are created using the
++ * DPL file, call dpsw_open() function to get an authentication
++ * token first
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_create(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ const struct dpsw_cfg *cfg,
++ uint16_t *token);
++
++/**
++ * dpsw_destroy() - Destroy the DPSW object and release all its resources.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpsw_destroy(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpsw_enable() - Enable DPSW functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpsw_disable() - Disable DPSW functionality
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * dpsw_is_enabled() - Check if the DPSW is enabled
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpsw_is_enabled(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en);
++
++/**
++ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_reset(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++ * DPSW IRQ Index and Events
++ */
++
++#define DPSW_IRQ_INDEX_IF 0x0000
++#define DPSW_IRQ_INDEX_L2SW 0x0001
++
++/**
++ * IRQ event - Indicates that the link state changed
++ */
++#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
++
++/**
++ * struct dpsw_irq_cfg - IRQ configuration
++ * @addr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dpsw_irq_cfg {
++ uint64_t addr;
++ uint32_t val;
++ int irq_num;
++};
++
++/**
++ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ struct dpsw_irq_cfg *irq_cfg);
++
++/**
++ * dpsw_get_irq() - Get IRQ information from the DPSW
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @type: Interrupt type: 0 represents message interrupt
++ * type (both irq_addr and irq_val are valid)
++ * @irq_cfg: IRQ attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ int *type,
++ struct dpsw_irq_cfg *irq_cfg);
++
++/**
++ * dpsw_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t en);
++
++/**
++ * dpsw_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned Interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint8_t *en);
++
++/**
++ * dpsw_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t mask);
++
++/**
++ * dpsw_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *mask);
++
++/**
++ * dpsw_get_irq_status() - Get the current status of any pending interrupts
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t *status);
++
++/**
++ * dpsw_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint8_t irq_index,
++ uint32_t status);
++/**
++ * struct dpsw_attr - Structure representing DPSW attributes
++ * @id: DPSW object ID
++ * @version: DPSW version
++ * @options: Enable/Disable DPSW features
++ * @max_vlans: Maximum Number of VLANs
++ * @max_meters_per_if: Number of meters per interface
++ * @max_fdbs: Maximum Number of FDBs
++ * @max_fdb_entries: Number of FDB entries for default FDB table;
++ * 0 - indicates default 1024 entries.
++ * @fdb_aging_time: Default FDB aging time for default FDB table;
++ * 0 - indicates default 300 seconds
++ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
++ * 0 - indicates default 32
++ * @mem_size: DPSW frame storage memory size
++ * @num_ifs: Number of interfaces
++ * @num_vlans: Current number of VLANs
++ * @num_fdbs: Current number of FDBs
++ * @component_type: Component type of this bridge
++ */
++struct dpsw_attr {
++ int id;
++ /**
++ * struct version - DPSW version
++ * @major: DPSW major version
++ * @minor: DPSW minor version
++ */
++ struct {
++ uint16_t major;
++ uint16_t minor;
++ } version;
++ uint64_t options;
++ uint16_t max_vlans;
++ uint8_t max_meters_per_if;
++ uint8_t max_fdbs;
++ uint16_t max_fdb_entries;
++ uint16_t fdb_aging_time;
++ uint16_t max_fdb_mc_groups;
++ uint16_t num_ifs;
++ uint16_t mem_size;
++ uint16_t num_vlans;
++ uint8_t num_fdbs;
++ enum dpsw_component_type component_type;
++};
++
++/**
++ * dpsw_get_attributes() - Retrieve DPSW attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @attr: Returned DPSW attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpsw_attr *attr);
++
++/**
++ * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Id
++ *
++ * Only one reflection receive interface is allowed per switch
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id);
++
++/**
++ * enum dpsw_action - Action selection for special/control frames
++ * @DPSW_ACTION_DROP: Drop frame
++ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
++ */
++enum dpsw_action {
++ DPSW_ACTION_DROP = 0,
++ DPSW_ACTION_REDIRECT = 1
++};
++
++/**
++ * Enable auto-negotiation
++ */
++#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++
++/**
++ * struct dpsw_link_cfg - Structure representing DPSW link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
++ */
++struct dpsw_link_cfg {
++ uint32_t rate;
++ uint64_t options;
++};
++
++/**
++ * dpsw_if_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_link_cfg *cfg);
++/**
++ * struct dpsw_link_state - Structure representing DPSW link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
++ * @up: 0 - covers two cases: down and disconnected, 1 - up
++ */
++struct dpsw_link_state {
++ uint32_t rate;
++ uint64_t options;
++ int up;
++};
++
++/**
++ * dpsw_if_get_link_state - Return the link state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: interface id
++ * @state: link state 1 - linkup, 0 - link down or disconnected
++ *
++ * @returns '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_link_state *state);
++
++/**
++ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en);
++
++/**
++ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en);
++
++/**
++ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int en);
++
++/**
++ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
++ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
++ * to the IEEE 802.1p priority
++ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
++ * separately or in conjunction with PCP to indicate frames
++ * eligible to be dropped in the presence of congestion
++ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
++ * to which the frame belongs. The hexadecimal values
++ * of 0x000 and 0xFFF are reserved;
++ * all other values may be used as VLAN identifiers,
++ * allowing up to 4,094 VLANs
++ */
++struct dpsw_tci_cfg {
++ uint8_t pcp;
++ uint8_t dei;
++ uint16_t vlan_id;
++};
++
++/**
++ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_tci_cfg *cfg);
++
++/**
++ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_tci_cfg *cfg);
++
++/**
++ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
++ * @DPSW_STP_STATE_BLOCKING: Blocking state
++ * @DPSW_STP_STATE_LISTENING: Listening state
++ * @DPSW_STP_STATE_LEARNING: Learning state
++ * @DPSW_STP_STATE_FORWARDING: Forwarding state
++ *
++ */
++enum dpsw_stp_state {
++ DPSW_STP_STATE_BLOCKING = 0,
++ DPSW_STP_STATE_LISTENING = 1,
++ DPSW_STP_STATE_LEARNING = 2,
++ DPSW_STP_STATE_FORWARDING = 3
++};
++
++/**
++ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
++ * @vlan_id: VLAN ID STP state
++ * @state: STP state
++ */
++struct dpsw_stp_cfg {
++ uint16_t vlan_id;
++ enum dpsw_stp_state state;
++};
++
++/**
++ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: STP State configuration parameters
++ *
++ * The following STP states are supported -
++ * blocking, listening, learning, forwarding and disabled.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_stp_cfg *cfg);
++
++/**
++ * enum dpsw_accepted_frames - Types of frames to accept
++ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
++ * priority tagged frames
++ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
++ * Priority-Tagged frames received on this interface.
++ *
++ */
++enum dpsw_accepted_frames {
++ DPSW_ADMIT_ALL = 1,
++ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
++};
++
++/**
++ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
++ * @type: Defines ingress accepted frames
++ * @unaccept_act: When a frame is not accepted, it may be discarded or
++ * redirected to control interface depending on this mode
++ */
++struct dpsw_accepted_frames_cfg {
++ enum dpsw_accepted_frames type;
++ enum dpsw_action unaccept_act;
++};
++
++/**
++ * dpsw_if_set_accepted_frames()
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Frame types configuration
++ *
++ * When is admit_only_vlan_tagged- the device will discard untagged
++ * frames or Priority-Tagged frames received on this interface.
++ * When admit_only_untagged- untagged frames or Priority-Tagged
++ * frames received on this interface will be accepted and assigned
++ * to a VID based on the PVID and VID Set for this interface.
++ * When admit_all - the device will accept VLAN tagged, untagged
++ * and priority tagged frames.
++ * The default is admit_all
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_accepted_frames_cfg *cfg);
++
++/**
++ * dpsw_if_set_accept_all_vlan()
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @accept_all: Accept or drop frames having different VLAN
++ *
++ * When this is accept (FALSE), the device will discard incoming
++ * frames for VLANs that do not include this interface in its
++ * Member set. When accept (TRUE), the interface will accept all incoming frames
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ int accept_all);
++
++/**
++ * enum dpsw_counter - Counters types
++ * @DPSW_CNT_ING_FRAME: Counts ingress frames
++ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
++ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
++ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
++ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPSW_CNT_EGR_FRAME: Counts egress frames
++ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
++ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
++ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
++ */
++enum dpsw_counter {
++ DPSW_CNT_ING_FRAME = 0x0,
++ DPSW_CNT_ING_BYTE = 0x1,
++ DPSW_CNT_ING_FLTR_FRAME = 0x2,
++ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
++ DPSW_CNT_ING_MCAST_FRAME = 0x4,
++ DPSW_CNT_ING_MCAST_BYTE = 0x5,
++ DPSW_CNT_ING_BCAST_FRAME = 0x6,
++ DPSW_CNT_ING_BCAST_BYTES = 0x7,
++ DPSW_CNT_EGR_FRAME = 0x8,
++ DPSW_CNT_EGR_BYTE = 0x9,
++ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
++ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
++};
++
++/**
++ * dpsw_if_get_counter() - Get specific counter of particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @type: Counter type
++ * @counter: return value
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpsw_counter type,
++ uint64_t *counter);
++
++/**
++ * dpsw_if_set_counter() - Set specific counter of particular interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @type: Counter type
++ * @counter: New counter value
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ enum dpsw_counter type,
++ uint64_t counter);
++
++/**
++ * Maximum number of TC
++ */
++#define DPSW_MAX_TC 8
++
++/**
++ * enum dpsw_priority_selector - User priority
++ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
++ * refers to the IEEE 802.1p priority.
++ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
++ * field from IP header
++ *
++ */
++enum dpsw_priority_selector {
++ DPSW_UP_PCP = 0,
++ DPSW_UP_DSCP = 1
++};
++
++/**
++ * enum dpsw_schedule_mode - Traffic classes scheduling
++ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
++ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
++ */
++enum dpsw_schedule_mode {
++ DPSW_SCHED_STRICT_PRIORITY,
++ DPSW_SCHED_WEIGHTED
++};
++
++/**
++ * struct dpsw_tx_schedule_cfg - traffic class configuration
++ * @mode: Strict or weight-based scheduling
++ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
++ */
++struct dpsw_tx_schedule_cfg {
++ enum dpsw_schedule_mode mode;
++ uint16_t delta_bandwidth;
++};
++
++/**
++ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
++ * class configuration
++ * @priority_selector: Source for user priority regeneration
++ * @tc_id: The Regenerated User priority that the incoming
++ * User Priority is mapped to for this interface
++ * @tc_sched: Traffic classes configuration
++ */
++struct dpsw_tx_selection_cfg {
++ enum dpsw_priority_selector priority_selector;
++ uint8_t tc_id[DPSW_MAX_PRIORITIES];
++ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
++};
++
++/**
++ * dpsw_if_set_tx_selection() - Function is used for mapping variety
++ * of frame fields
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Traffic class mapping configuration
++ *
++ * Function is used for mapping variety of frame fields (DSCP, PCP)
++ * to Traffic Class. Traffic class is a number
++ * in the range from 0 to 7
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_tx_selection_cfg *cfg);
++
++/**
++ * enum dpsw_reflection_filter - Filter type for frames to reflect
++ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
++ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
++ * particular VLAN defined by vid parameter
++ *
++ */
++enum dpsw_reflection_filter {
++ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
++ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
++};
++
++/**
++ * struct dpsw_reflection_cfg - Structure representing reflection information
++ * @filter: Filter type for frames to reflect
++ * @vlan_id: Vlan Id to reflect; valid only when filter type is
++ * DPSW_INGRESS_VLAN
++ */
++struct dpsw_reflection_cfg {
++ enum dpsw_reflection_filter filter;
++ uint16_t vlan_id;
++};
++
++/**
++ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Reflection configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_reflection_cfg *cfg);
++
++/**
++ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Reflection configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_reflection_cfg *cfg);
++
++/**
++ * enum dpsw_metering_mode - Metering modes
++ * @DPSW_METERING_MODE_NONE: metering disabled
++ * @DPSW_METERING_MODE_RFC2698: RFC 2698
++ * @DPSW_METERING_MODE_RFC4115: RFC 4115
++ */
++enum dpsw_metering_mode {
++ DPSW_METERING_MODE_NONE = 0,
++ DPSW_METERING_MODE_RFC2698,
++ DPSW_METERING_MODE_RFC4115
++};
++
++/**
++ * enum dpsw_metering_unit - Metering count
++ * @DPSW_METERING_UNIT_BYTES: count bytes
++ * @DPSW_METERING_UNIT_FRAMES: count frames
++ */
++enum dpsw_metering_unit {
++ DPSW_METERING_UNIT_BYTES = 0,
++ DPSW_METERING_UNIT_FRAMES
++};
++
++/**
++ * struct dpsw_metering_cfg - Metering configuration
++ * @mode: metering modes
++ * @units: Bytes or frame units
++ * @cir: Committed information rate (CIR) in Kbits/s
++ * @eir: Peak information rate (PIR) Kbit/s rfc2698
++ * Excess information rate (EIR) Kbit/s rfc4115
++ * @cbs: Committed burst size (CBS) in bytes
++ * @ebs: Peak burst size (PBS) in bytes for rfc2698
++ * Excess bust size (EBS) in bytes rfc4115
++ *
++ */
++struct dpsw_metering_cfg {
++ enum dpsw_metering_mode mode;
++ enum dpsw_metering_unit units;
++ uint32_t cir;
++ uint32_t eir;
++ uint32_t cbs;
++ uint32_t ebs;
++};
++
++/**
++ * dpsw_if_set_flooding_metering() - Set flooding metering
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @cfg: Metering parameters
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ const struct dpsw_metering_cfg *cfg);
++
++/**
++ * dpsw_if_set_metering() - Set interface metering for flooding
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @tc_id: Traffic class ID
++ * @cfg: Metering parameters
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint8_t tc_id,
++ const struct dpsw_metering_cfg *cfg);
++
++/**
++ * enum dpsw_early_drop_unit - DPSW early drop unit
++ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
++ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
++ */
++enum dpsw_early_drop_unit {
++ DPSW_EARLY_DROP_UNIT_BYTE = 0,
++ DPSW_EARLY_DROP_UNIT_FRAMES
++};
++
++/**
++ * enum dpsw_early_drop_mode - DPSW early drop mode
++ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
++ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
++ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
++ */
++enum dpsw_early_drop_mode {
++ DPSW_EARLY_DROP_MODE_NONE = 0,
++ DPSW_EARLY_DROP_MODE_TAIL,
++ DPSW_EARLY_DROP_MODE_WRED
++};
++
++/**
++ * struct dpsw_wred_cfg - WRED configuration
++ * @max_threshold: maximum threshold that packets may be discarded. Above this
++ * threshold all packets are discarded; must be less than 2^39;
++ * approximated to be expressed as (x+256)*2^(y-1) due to HW
++ * implementation.
++ * @min_threshold: minimum threshold that packets may be discarded at
++ * @drop_probability: probability that a packet will be discarded (1-100,
++ * associated with the maximum threshold)
++ */
++struct dpsw_wred_cfg {
++ uint64_t min_threshold;
++ uint64_t max_threshold;
++ uint8_t drop_probability;
++};
++
++/**
++ * struct dpsw_early_drop_cfg - early-drop configuration
++ * @drop_mode: drop mode
++ * @units: count units
++ * @yellow: WRED - 'yellow' configuration
++ * @green: WRED - 'green' configuration
++ * @tail_drop_threshold: tail drop threshold
++ */
++struct dpsw_early_drop_cfg {
++ enum dpsw_early_drop_mode drop_mode;
++ enum dpsw_early_drop_unit units;
++ struct dpsw_wred_cfg yellow;
++ struct dpsw_wred_cfg green;
++ uint32_t tail_drop_threshold;
++};
++
++/**
++ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
++ * @cfg: Early-drop configuration
++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before dpsw_if_tc_set_early_drop
++ *
++ */
++void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
++ uint8_t *early_drop_buf);
++
++/**
++ * dpsw_if_set_early_drop() - Set interface traffic class early-drop
++ * configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @tc_id: Traffic class selection (0-7)
++ * @early_drop_iova: I/O virtual address of 64 bytes;
++ * Must be cacheline-aligned and DMA-able memory
++ *
++ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
++ * to prepare the early_drop_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint8_t tc_id,
++ uint64_t early_drop_iova);
++
++/**
++ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
++ * @tpid: An additional tag protocol identifier
++ */
++struct dpsw_custom_tpid_cfg {
++ uint16_t tpid;
++};
++
++/**
++ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @cfg: Tag Protocol identifier
++ *
++ * API Configures a distinct Ethernet type value (or TPID value)
++ * to indicate a VLAN tag in addition to the common
++ * TPID values 0x8100 and 0x88A8.
++ * Two additional TPID's are supported
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_custom_tpid_cfg *cfg);
++
++/**
++ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @cfg: Tag Protocol identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_custom_tpid_cfg *cfg);
++
++/**
++ * dpsw_if_enable() - Enable Interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id);
++
++/**
++ * dpsw_if_disable() - Disable Interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id);
++
++/**
++ * struct dpsw_if_attr - Structure representing DPSW interface attributes
++ * @num_tcs: Number of traffic classes
++ * @rate: Transmit rate in bits per second
++ * @options: Interface configuration options (bitmap)
++ * @enabled: Indicates if interface is enabled
++ * @accept_all_vlan: The device discards/accepts incoming frames
++ * for VLANs that do not include this interface
++ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
++ * discards untagged frames or priority-tagged frames received on
++ * this interface;
++ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
++ * tagged frames received on this interface are accepted
++ * @qdid: control frames transmit qdid
++ */
++struct dpsw_if_attr {
++ uint8_t num_tcs;
++ uint32_t rate;
++ uint32_t options;
++ int enabled;
++ int accept_all_vlan;
++ enum dpsw_accepted_frames admit_untagged;
++ uint16_t qdid;
++};
++
++/**
++ * dpsw_if_get_attributes() - Function obtains attributes of interface
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @attr: Returned interface attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ struct dpsw_if_attr *attr);
++
++/**
++ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @frame_length: Maximum Frame Length
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint16_t frame_length);
++
++/**
++ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @frame_length: Returned maximum Frame Length
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t if_id,
++ uint16_t *frame_length);
++
++/**
++ * struct dpsw_vlan_cfg - VLAN Configuration
++ * @fdb_id: Forwarding Data Base
++ */
++struct dpsw_vlan_cfg {
++ uint16_t fdb_id;
++};
++
++/**
++ * dpsw_vlan_add() - Adding new VLAN to DPSW.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: VLAN configuration
++ *
++ * Only VLAN ID and FDB ID are required parameters here.
++ * 12 bit VLAN ID is defined in IEEE802.1Q.
++ * Adding a duplicate VLAN ID is not allowed.
++ * FDB ID can be shared across multiple VLANs. Shared learning
++ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
++ * with same fdb_id
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_cfg *cfg);
++
++/**
++ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
++ * @num_ifs: The number of interfaces that are assigned to the egress
++ * list for this VLAN
++ * @if_id: The set of interfaces that are
++ * assigned to the egress list for this VLAN
++ */
++struct dpsw_vlan_if_cfg {
++ uint16_t num_ifs;
++ uint16_t if_id[DPSW_MAX_IF];
++};
++
++/**
++ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces to add
++ *
++ * It adds only interfaces not belonging to this VLAN yet,
++ * otherwise an error is generated and an entire command is
++ * ignored. This function can be called numerous times always
++ * providing required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
++ * transmitted as untagged.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: set of interfaces that should be transmitted as untagged
++ *
++ * These interfaces should already belong to this VLAN.
++ * By default all interfaces are transmitted as tagged.
++ * Providing un-existing interface or untagged interface that is
++ * configured untagged already generates an error and the entire
++ * command is ignored.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
++ * included in flooding when frame with unknown destination
++ * unicast MAC arrived.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be used for flooding
++ *
++ * These interfaces should belong to this VLAN. By default all
++ * interfaces are included into flooding list. Providing
++ * un-existing interface or an interface that already in the
++ * flooding list generates an error and the entire command is
++ * ignored.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be removed
++ *
++ * Interfaces must belong to this VLAN, otherwise an error
++ * is returned and an the command is ignored
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
++ * converted from transmitted as untagged to transmit as tagged.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: set of interfaces that should be removed
++ *
++ * Interfaces provided by API have to belong to this VLAN and
++ * configured untagged, otherwise an error is returned and the
++ * command is ignored
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
++ * removed from the flooding list.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: set of interfaces used for flooding
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_remove() - Remove an entire VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id);
++
++/**
++ * struct dpsw_vlan_attr - VLAN attributes
++ * @fdb_id: Associated FDB ID
++ * @num_ifs: Number of interfaces
++ * @num_untagged_ifs: Number of untagged interfaces
++ * @num_flooding_ifs: Number of flooding interfaces
++ */
++struct dpsw_vlan_attr {
++ uint16_t fdb_id;
++ uint16_t num_ifs;
++ uint16_t num_untagged_ifs;
++ uint16_t num_flooding_ifs;
++};
++
++/**
++ * dpsw_vlan_get_attributes() - Get VLAN attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @attr: Returned DPSW attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_attr *attr);
++
++/**
++ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of interfaces belong to this VLAN
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of flooding interfaces
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
++ * untagged
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @vlan_id: VLAN Identifier
++ * @cfg: Returned set of untagged interfaces
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t vlan_id,
++ struct dpsw_vlan_if_cfg *cfg);
++
++/**
++ * struct dpsw_fdb_cfg - FDB Configuration
++ * @num_fdb_entries: Number of FDB entries
++ * @fdb_aging_time: Aging time in seconds
++ */
++struct dpsw_fdb_cfg {
++ uint16_t num_fdb_entries;
++ uint16_t fdb_aging_time;
++};
++
++/**
++ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
++ * the reference
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Returned Forwarding Database Identifier
++ * @cfg: FDB Configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *fdb_id,
++ const struct dpsw_fdb_cfg *cfg);
++
++/**
++ * dpsw_fdb_remove() - Remove FDB from switch
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id);
++
++/**
++ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
++ * @DPSW_FDB_ENTRY_STATIC: Static entry
++ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
++ */
++enum dpsw_fdb_entry_type {
++ DPSW_FDB_ENTRY_STATIC = 0,
++ DPSW_FDB_ENTRY_DINAMIC = 1
++};
++
++/**
++ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
++ * @type: Select static or dynamic entry
++ * @mac_addr: MAC address
++ * @if_egress: Egress interface ID
++ */
++struct dpsw_fdb_unicast_cfg {
++ enum dpsw_fdb_entry_type type;
++ uint8_t mac_addr[6];
++ uint16_t if_egress;
++};
++
++/**
++ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg);
++
++/**
++ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
++ * unicast Ethernet address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Returned unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_unicast_cfg *cfg);
++
++/**
++ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg);
++
++/**
++ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
++ * @type: Select static or dynamic entry
++ * @mac_addr: MAC address
++ * @num_ifs: Number of external and internal interfaces
++ * @if_id: Egress interface IDs
++ */
++struct dpsw_fdb_multicast_cfg {
++ enum dpsw_fdb_entry_type type;
++ uint8_t mac_addr[6];
++ uint16_t num_ifs;
++ uint16_t if_id[DPSW_MAX_IF];
++};
++
++/**
++ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
++ *
++ * If group doesn't exist, it will be created.
++ * It adds only interfaces not belonging to this multicast group
++ * yet, otherwise error will be generated and the command is
++ * ignored.
++ * This function may be called numerous times always providing
++ * required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg);
++
++/**
++ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
++ * address.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Returned multicast entry configuration
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_multicast_cfg *cfg);
++
++/**
++ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
++ * group.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
++ *
++ * Interfaces provided by this API have to exist in the group,
++ * otherwise an error will be returned and an entire command
++ * ignored. If there is no interface left in the group,
++ * an entire group is deleted
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg);
++
++/**
++ * enum dpsw_fdb_learning_mode - Auto-learning modes
++ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
++ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
++ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
++ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
++ *
++ * NONE - SECURE LEARNING
++ * SMAC found DMAC found CTLU Action
++ * v v Forward frame to
++ * 1. DMAC destination
++ * - v Forward frame to
++ * 1. DMAC destination
++ * 2. Control interface
++ * v - Forward frame to
++ * 1. Flooding list of interfaces
++ * - - Forward frame to
++ * 1. Flooding list of interfaces
++ * 2. Control interface
++ * SECURE LEARING
++ * SMAC found DMAC found CTLU Action
++ * v v Forward frame to
++ * 1. DMAC destination
++ * - v Forward frame to
++ * 1. Control interface
++ * v - Forward frame to
++ * 1. Flooding list of interfaces
++ * - - Forward frame to
++ * 1. Control interface
++ */
++enum dpsw_fdb_learning_mode {
++ DPSW_FDB_LEARNING_MODE_DIS = 0,
++ DPSW_FDB_LEARNING_MODE_HW = 1,
++ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
++ DPSW_FDB_LEARNING_MODE_SECURE = 3
++};
++
++/**
++ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @mode: learning mode
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ enum dpsw_fdb_learning_mode mode);
++
++/**
++ * struct dpsw_fdb_attr - FDB Attributes
++ * @max_fdb_entries: Number of FDB entries
++ * @fdb_aging_time: Aging time in seconds
++ * @learning_mode: Learning mode
++ * @num_fdb_mc_groups: Current number of multicast groups
++ * @max_fdb_mc_groups: Maximum number of multicast groups
++ */
++struct dpsw_fdb_attr {
++ uint16_t max_fdb_entries;
++ uint16_t fdb_aging_time;
++ enum dpsw_fdb_learning_mode learning_mode;
++ uint16_t num_fdb_mc_groups;
++ uint16_t max_fdb_mc_groups;
++};
++
++/**
++ * dpsw_fdb_get_attributes() - Get FDB attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @attr: Returned FDB attributes
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
++ */
++int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t fdb_id,
++ struct dpsw_fdb_attr *attr);
++
++/**
++ * struct dpsw_acl_cfg - ACL Configuration
++ * @max_entries: Number of FDB entries
++ */
++struct dpsw_acl_cfg {
++ uint16_t max_entries;
++};
++
++/**
++ * struct dpsw_acl_fields - ACL fields.
++ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
++ * slow protocols, MVRP, STP
++ * @l2_source_mac: Source MAC address
++ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
++ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
++ * Q-in-Q, IPv4, IPv6, PPPoE
++ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
++ * @l2_vlan_id: layer 2 VLAN ID
++ * @l2_ether_type: layer 2 Ethernet type
++ * @l3_dscp: Layer 3 differentiated services code point
++ * @l3_protocol: Tells the Network layer at the destination host, to which
++ * Protocol this packet belongs to. The following protocol are
++ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
++ * (encapsulation), GRE, PTP
++ * @l3_source_ip: Source IPv4 IP
++ * @l3_dest_ip: Destination IPv4 IP
++ * @l4_source_port: Source TCP/UDP Port
++ * @l4_dest_port: Destination TCP/UDP Port
++ */
++struct dpsw_acl_fields {
++ uint8_t l2_dest_mac[6];
++ uint8_t l2_source_mac[6];
++ uint16_t l2_tpid;
++ uint8_t l2_pcp_dei;
++ uint16_t l2_vlan_id;
++ uint16_t l2_ether_type;
++ uint8_t l3_dscp;
++ uint8_t l3_protocol;
++ uint32_t l3_source_ip;
++ uint32_t l3_dest_ip;
++ uint16_t l4_source_port;
++ uint16_t l4_dest_port;
++};
++
++/**
++ * struct dpsw_acl_key - ACL key
++ * @match: Match fields
++ * @mask: Mask: b'1 - valid, b'0 don't care
++ */
++struct dpsw_acl_key {
++ struct dpsw_acl_fields match;
++ struct dpsw_acl_fields mask;
++};
++
++/**
++ * enum dpsw_acl_action
++ * @DPSW_ACL_ACTION_DROP: Drop frame
++ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
++ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
++ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
++ */
++enum dpsw_acl_action {
++ DPSW_ACL_ACTION_DROP,
++ DPSW_ACL_ACTION_REDIRECT,
++ DPSW_ACL_ACTION_ACCEPT,
++ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
++};
++
++/**
++ * struct dpsw_acl_result - ACL action
++ * @action: Action should be taken when ACL entry hit
++ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
++ * action
++ */
++struct dpsw_acl_result {
++ enum dpsw_acl_action action;
++ uint16_t if_id;
++};
++
++/**
++ * struct dpsw_acl_entry_cfg - ACL entry
++ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
++ * to dpsw_acl_prepare_entry_cfg()
++ * @result: Required action when entry hit occurs
++ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
++ * during the lifetime of a Policy. It is user responsibility to
++ * space the priorities according to consequent rule additions.
++ */
++struct dpsw_acl_entry_cfg {
++ uint64_t key_iova;
++ struct dpsw_acl_result result;
++ int precedence;
++};
++
++/**
++ * dpsw_acl_add() - Adds ACL to L2 switch.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: Returned ACL ID, for the future reference
++ * @cfg: ACL configuration
++ *
++ * Create Access Control List. Multiple ACLs can be created and
++ * co-exist in L2 switch
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t *acl_id,
++ const struct dpsw_acl_cfg *cfg);
++
++/**
++ * dpsw_acl_remove() - Removes ACL from L2 switch.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id);
++
++/**
++ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
++ * @key: key
++ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before adding or removing acl_entry
++ *
++ */
++void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
++ uint8_t *entry_cfg_buf);
++
++/**
++ * dpsw_acl_add_entry() - Adds an entry to ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: entry configuration
++ *
++ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_entry_cfg *cfg);
++
++/**
++ * dpsw_acl_remove_entry() - Removes an entry from ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: entry configuration
++ *
++ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_entry_cfg *cfg);
++
++/**
++ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
++ * @num_ifs: Number of interfaces
++ * @if_id: List of interfaces
++ */
++struct dpsw_acl_if_cfg {
++ uint16_t num_ifs;
++ uint16_t if_id[DPSW_MAX_IF];
++};
++
++/**
++ * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: interfaces list
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_if_cfg *cfg);
++
++/**
++ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @acl_id: ACL ID
++ * @cfg: interfaces list
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ const struct dpsw_acl_if_cfg *cfg);
++
++/**
++ * struct dpsw_acl_attr - ACL Attributes
++ * @max_entries: Max number of ACL entries
++ * @num_entries: Number of used ACL entries
++ * @num_ifs: Number of interfaces associated with ACL
++ */
++struct dpsw_acl_attr {
++ uint16_t max_entries;
++ uint16_t num_entries;
++ uint16_t num_ifs;
++};
++
++/**
++* dpsw_acl_get_attributes() - Get specific counter of particular interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++* @acl_id: ACL Identifier
++* @attr: Returned ACL attributes
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ uint16_t acl_id,
++ struct dpsw_acl_attr *attr);
++/**
++* struct dpsw_ctrl_if_attr - Control interface attributes
++* @rx_fqid: Receive FQID
++* @rx_err_fqid: Receive error FQID
++* @tx_err_conf_fqid: Transmit error and confirmation FQID
++*/
++struct dpsw_ctrl_if_attr {
++ uint32_t rx_fqid;
++ uint32_t rx_err_fqid;
++ uint32_t tx_err_conf_fqid;
++};
++
++/**
++* dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++* @attr: Returned control interface attributes
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ struct dpsw_ctrl_if_attr *attr);
++
++/**
++ * Maximum number of DPBP
++ */
++#define DPSW_MAX_DPBP 8
++
++/**
++ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
++ * @num_dpbp: Number of DPBPs
++ * @pools: Array of buffer pools parameters; The number of valid entries
++ * must match 'num_dpbp' value
++ */
++struct dpsw_ctrl_if_pools_cfg {
++ uint8_t num_dpbp;
++ /**
++ * struct pools - Buffer pools parameters
++ * @dpbp_id: DPBP object ID
++ * @buffer_size: Buffer size
++ * @backup_pool: Backup pool
++ */
++ struct {
++ int dpbp_id;
++ uint16_t buffer_size;
++ int backup_pool;
++ } pools[DPSW_MAX_DPBP];
++};
++
++/**
++* dpsw_ctrl_if_set_pools() - Set control interface buffer pools
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++* @cfg: buffer pools configuration
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ const struct dpsw_ctrl_if_pools_cfg *cfg);
++
++/**
++* dpsw_ctrl_if_enable() - Enable control interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++/**
++* dpsw_ctrl_if_disable() - Function disables control interface
++* @mc_io: Pointer to MC portal's I/O object
++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++* @token: Token of DPSW object
++*
++* Return: '0' on Success; Error code otherwise.
++*/
++int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token);
++
++#endif /* __FSL_DPSW_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h b/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h
+new file mode 100644
+index 0000000..c65fe38
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h
+@@ -0,0 +1,916 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_DPSW_CMD_H
++#define __FSL_DPSW_CMD_H
++
++/* DPSW Version */
++#define DPSW_VER_MAJOR 7
++#define DPSW_VER_MINOR 0
++
++/* Command IDs */
++#define DPSW_CMDID_CLOSE 0x800
++#define DPSW_CMDID_OPEN 0x802
++#define DPSW_CMDID_CREATE 0x902
++#define DPSW_CMDID_DESTROY 0x900
++
++#define DPSW_CMDID_ENABLE 0x002
++#define DPSW_CMDID_DISABLE 0x003
++#define DPSW_CMDID_GET_ATTR 0x004
++#define DPSW_CMDID_RESET 0x005
++#define DPSW_CMDID_IS_ENABLED 0x006
++
++#define DPSW_CMDID_SET_IRQ 0x010
++#define DPSW_CMDID_GET_IRQ 0x011
++#define DPSW_CMDID_SET_IRQ_ENABLE 0x012
++#define DPSW_CMDID_GET_IRQ_ENABLE 0x013
++#define DPSW_CMDID_SET_IRQ_MASK 0x014
++#define DPSW_CMDID_GET_IRQ_MASK 0x015
++#define DPSW_CMDID_GET_IRQ_STATUS 0x016
++#define DPSW_CMDID_CLEAR_IRQ_STATUS 0x017
++
++#define DPSW_CMDID_SET_REFLECTION_IF 0x022
++
++#define DPSW_CMDID_ADD_CUSTOM_TPID 0x024
++
++#define DPSW_CMDID_REMOVE_CUSTOM_TPID 0x026
++
++#define DPSW_CMDID_IF_SET_TCI 0x030
++#define DPSW_CMDID_IF_SET_STP 0x031
++#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES 0x032
++#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN 0x033
++#define DPSW_CMDID_IF_GET_COUNTER 0x034
++#define DPSW_CMDID_IF_SET_COUNTER 0x035
++#define DPSW_CMDID_IF_SET_TX_SELECTION 0x036
++#define DPSW_CMDID_IF_ADD_REFLECTION 0x037
++#define DPSW_CMDID_IF_REMOVE_REFLECTION 0x038
++#define DPSW_CMDID_IF_SET_FLOODING_METERING 0x039
++#define DPSW_CMDID_IF_SET_METERING 0x03A
++#define DPSW_CMDID_IF_SET_EARLY_DROP 0x03B
++
++#define DPSW_CMDID_IF_ENABLE 0x03D
++#define DPSW_CMDID_IF_DISABLE 0x03E
++
++#define DPSW_CMDID_IF_GET_ATTR 0x042
++
++#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH 0x044
++#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH 0x045
++#define DPSW_CMDID_IF_GET_LINK_STATE 0x046
++#define DPSW_CMDID_IF_SET_FLOODING 0x047
++#define DPSW_CMDID_IF_SET_BROADCAST 0x048
++#define DPSW_CMDID_IF_SET_MULTICAST 0x049
++#define DPSW_CMDID_IF_GET_TCI 0x04A
++
++#define DPSW_CMDID_IF_SET_LINK_CFG 0x04C
++
++#define DPSW_CMDID_VLAN_ADD 0x060
++#define DPSW_CMDID_VLAN_ADD_IF 0x061
++#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED 0x062
++#define DPSW_CMDID_VLAN_ADD_IF_FLOODING 0x063
++#define DPSW_CMDID_VLAN_REMOVE_IF 0x064
++#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED 0x065
++#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING 0x066
++#define DPSW_CMDID_VLAN_REMOVE 0x067
++#define DPSW_CMDID_VLAN_GET_IF 0x068
++#define DPSW_CMDID_VLAN_GET_IF_FLOODING 0x069
++#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED 0x06A
++#define DPSW_CMDID_VLAN_GET_ATTRIBUTES 0x06B
++
++#define DPSW_CMDID_FDB_GET_MULTICAST 0x080
++#define DPSW_CMDID_FDB_GET_UNICAST 0x081
++#define DPSW_CMDID_FDB_ADD 0x082
++#define DPSW_CMDID_FDB_REMOVE 0x083
++#define DPSW_CMDID_FDB_ADD_UNICAST 0x084
++#define DPSW_CMDID_FDB_REMOVE_UNICAST 0x085
++#define DPSW_CMDID_FDB_ADD_MULTICAST 0x086
++#define DPSW_CMDID_FDB_REMOVE_MULTICAST 0x087
++#define DPSW_CMDID_FDB_SET_LEARNING_MODE 0x088
++#define DPSW_CMDID_FDB_GET_ATTR 0x089
++
++#define DPSW_CMDID_ACL_ADD 0x090
++#define DPSW_CMDID_ACL_REMOVE 0x091
++#define DPSW_CMDID_ACL_ADD_ENTRY 0x092
++#define DPSW_CMDID_ACL_REMOVE_ENTRY 0x093
++#define DPSW_CMDID_ACL_ADD_IF 0x094
++#define DPSW_CMDID_ACL_REMOVE_IF 0x095
++#define DPSW_CMDID_ACL_GET_ATTR 0x096
++
++#define DPSW_CMDID_CTRL_IF_GET_ATTR 0x0A0
++#define DPSW_CMDID_CTRL_IF_SET_POOLS 0x0A1
++#define DPSW_CMDID_CTRL_IF_ENABLE 0x0A2
++#define DPSW_CMDID_CTRL_IF_DISABLE 0x0A3
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_OPEN(cmd, dpsw_id) \
++ MC_CMD_OP(cmd, 0, 0, 32, int, dpsw_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_CREATE(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->num_ifs);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->adv.max_fdbs);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->adv.max_meters_per_if);\
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_component_type, \
++ cfg->adv.component_type);\
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_vlans);\
++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_fdb_entries);\
++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.fdb_aging_time);\
++ MC_CMD_OP(cmd, 1, 48, 16, uint16_t, cfg->adv.max_fdb_mc_groups);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IS_ENABLED(cmd, en) \
++ MC_RSP_OP(cmd, 0, 0, 1, int, en)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_GET_IRQ(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_IRQ(cmd, type, irq_cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_GET_IRQ_MASK(cmd, irq_index) \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_IRQ_MASK(cmd, mask) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_IRQ_STATUS(cmd, status) \
++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->num_ifs);\
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->max_fdbs);\
++ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->num_fdbs);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->max_vlans);\
++ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_vlans);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->max_fdb_entries);\
++ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->fdb_aging_time);\
++ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\
++ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, attr->mem_size);\
++ MC_RSP_OP(cmd, 2, 48, 16, uint16_t, attr->max_fdb_mc_groups);\
++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\
++ MC_RSP_OP(cmd, 4, 0, 8, uint8_t, attr->max_meters_per_if);\
++ MC_RSP_OP(cmd, 4, 8, 4, enum dpsw_component_type, \
++ attr->component_type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_SET_REFLECTION_IF(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_BROADCAST(cmd, if_id, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_MULTICAST(cmd, if_id, en) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 12, uint16_t, cfg->vlan_id);\
++ MC_CMD_OP(cmd, 0, 28, 1, uint8_t, cfg->dei);\
++ MC_CMD_OP(cmd, 0, 29, 3, uint8_t, cfg->pcp);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_TCI(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_TCI(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, cfg->dei);\
++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, cfg->pcp);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_STP(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_stp_state, cfg->state);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_accepted_frames, cfg->type);\
++ MC_CMD_OP(cmd, 0, 20, 4, enum dpsw_action, cfg->unaccept_act);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 1, int, accept_all);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_COUNTER(cmd, counter) \
++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, counter);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 3, enum dpsw_priority_selector, \
++ cfg->priority_selector);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->tc_id[0]);\
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->tc_id[1]);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->tc_id[2]);\
++ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->tc_id[3]);\
++ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->tc_id[4]);\
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->tc_id[5]);\
++ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, cfg->tc_id[6]);\
++ MC_CMD_OP(cmd, 1, 56, 8, uint8_t, cfg->tc_id[7]);\
++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\
++ MC_CMD_OP(cmd, 2, 16, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[0].mode);\
++ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\
++ MC_CMD_OP(cmd, 2, 48, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[1].mode);\
++ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\
++ MC_CMD_OP(cmd, 3, 16, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[2].mode);\
++ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\
++ MC_CMD_OP(cmd, 3, 48, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[3].mode);\
++ MC_CMD_OP(cmd, 4, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\
++ MC_CMD_OP(cmd, 4, 16, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[4].mode);\
++ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\
++ MC_CMD_OP(cmd, 4, 48, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[5].mode);\
++ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\
++ MC_CMD_OP(cmd, 5, 16, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[6].mode);\
++ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\
++ MC_CMD_OP(cmd, 5, 48, 4, enum dpsw_schedule_mode, \
++ cfg->tc_sched[7].mode);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
++ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
++ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\
++ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id);\
++ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\
++ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\
++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\
++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\
++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_PREP_EARLY_DROP(ext, cfg) \
++do { \
++ MC_PREP_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \
++ MC_PREP_OP(ext, 0, 2, 2, \
++ enum dpsw_early_drop_unit, cfg->units); \
++ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
++ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
++ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
++ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
++ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
++ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_EXT_EARLY_DROP(ext, cfg) \
++do { \
++ MC_EXT_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \
++ MC_EXT_OP(ext, 0, 2, 2, \
++ enum dpsw_early_drop_unit, cfg->units); \
++ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
++ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
++ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
++ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
++ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
++ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
++ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova) \
++do { \
++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, if_id); \
++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_ENABLE(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_DISABLE(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_ATTR(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 0, 4, enum dpsw_accepted_frames, \
++ attr->admit_untagged);\
++ MC_RSP_OP(cmd, 0, 5, 1, int, attr->enabled);\
++ MC_RSP_OP(cmd, 0, 6, 1, int, attr->accept_all_vlan);\
++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->num_tcs);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qdid);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->rate);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, frame_length);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, frame_length) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, frame_length)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_IF_GET_LINK_STATE(cmd, state) \
++do { \
++ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++#define DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_REMOVE(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_VLAN_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->fdb_id); \
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_ifs); \
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_untagged_ifs); \
++ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->num_flooding_ifs); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_GET_IF(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_VLAN_GET_IF(cmd, cfg) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_VLAN_GET_IF_UNTAGGED(cmd, cfg) \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
++
++/* param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_ADD(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->fdb_aging_time);\
++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->num_fdb_entries);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_FDB_ADD(cmd, fdb_id) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_REMOVE(cmd, fdb_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint16_t, cfg->if_egress);\
++ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_FDB_GET_UNICAST(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\
++ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\
++ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->num_ifs);\
++ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
++ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\
++ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\
++ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\
++ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\
++ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\
++ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\
++ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
++ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_fdb_learning_mode, mode);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_FDB_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->max_fdb_entries);\
++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->fdb_aging_time);\
++ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_fdb_mc_groups);\
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_fdb_mc_groups);\
++ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_learning_mode, \
++ attr->learning_mode);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_ADD(cmd, cfg) \
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->max_entries)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_ACL_ADD(cmd, acl_id) \
++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, acl_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_REMOVE(cmd, acl_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_PREP_ACL_ENTRY(ext, key) \
++do { \
++ MC_PREP_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\
++ MC_PREP_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\
++ MC_PREP_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\
++ MC_PREP_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\
++ MC_PREP_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\
++ MC_PREP_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\
++ MC_PREP_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\
++ MC_PREP_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\
++ MC_PREP_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\
++ MC_PREP_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\
++ MC_PREP_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\
++ MC_PREP_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\
++ MC_PREP_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\
++ MC_PREP_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\
++ MC_PREP_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\
++ MC_PREP_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\
++ MC_PREP_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\
++ MC_PREP_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\
++ MC_PREP_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\
++ MC_PREP_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\
++ MC_PREP_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\
++ MC_PREP_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\
++ MC_PREP_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\
++ MC_PREP_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\
++ MC_PREP_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\
++ MC_PREP_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\
++ MC_PREP_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\
++ MC_PREP_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\
++ MC_PREP_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\
++ MC_PREP_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\
++ MC_PREP_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\
++ MC_PREP_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\
++ MC_PREP_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\
++ MC_PREP_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\
++ MC_PREP_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\
++ MC_PREP_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\
++ MC_PREP_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\
++ MC_PREP_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\
++ MC_PREP_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\
++ MC_PREP_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\
++ MC_PREP_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\
++ MC_PREP_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\
++ MC_PREP_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\
++ MC_PREP_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_EXT_ACL_ENTRY(ext, key) \
++do { \
++ MC_EXT_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\
++ MC_EXT_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\
++ MC_EXT_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\
++ MC_EXT_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\
++ MC_EXT_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\
++ MC_EXT_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\
++ MC_EXT_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\
++ MC_EXT_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\
++ MC_EXT_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\
++ MC_EXT_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\
++ MC_EXT_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\
++ MC_EXT_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\
++ MC_EXT_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\
++ MC_EXT_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\
++ MC_EXT_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\
++ MC_EXT_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\
++ MC_EXT_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\
++ MC_EXT_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\
++ MC_EXT_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\
++ MC_EXT_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\
++ MC_EXT_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\
++ MC_EXT_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\
++ MC_EXT_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\
++ MC_EXT_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\
++ MC_EXT_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\
++ MC_EXT_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\
++ MC_EXT_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\
++ MC_EXT_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\
++ MC_EXT_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\
++ MC_EXT_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\
++ MC_EXT_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\
++ MC_EXT_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\
++ MC_EXT_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\
++ MC_EXT_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\
++ MC_EXT_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\
++ MC_EXT_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\
++ MC_EXT_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\
++ MC_EXT_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\
++ MC_EXT_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\
++ MC_EXT_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\
++ MC_EXT_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\
++ MC_EXT_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\
++ MC_EXT_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\
++ MC_EXT_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\
++ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\
++ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\
++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_ACL_GET_ATTR(cmd, acl_id) \
++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_ACL_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_entries);\
++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_entries);\
++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_ifs);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr) \
++do { \
++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rx_fqid);\
++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, attr->rx_err_fqid);\
++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tx_err_conf_fqid);\
++} while (0)
++
++/* cmd, param, offset, width, type, arg_name */
++#define DPSW_CMD_CTRL_IF_SET_POOLS(cmd, cfg) \
++do { \
++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
++ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
++ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
++ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
++ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
++ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
++ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
++ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
++ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
++ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
++ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
++ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
++ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
++ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
++ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
++ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
++ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
++ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
++ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
++ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
++} while (0)
++
++#endif /* __FSL_DPSW_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_mc_cmd.h b/drivers/net/dpaa2/mc/fsl_mc_cmd.h
+new file mode 100644
+index 0000000..ac4f2b4
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_mc_cmd.h
+@@ -0,0 +1,221 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_MC_CMD_H
++#define __FSL_MC_CMD_H
++
++#define MC_CMD_NUM_OF_PARAMS 7
++
++#define MAKE_UMASK64(_width) \
++ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 :\
++ (uint64_t)-1))
++static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
++{
++ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
++}
++
++static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
++{
++ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
++}
++
++struct mc_command {
++ uint64_t header;
++ uint64_t params[MC_CMD_NUM_OF_PARAMS];
++};
++
++/**
++ * enum mc_cmd_status - indicates MC status at command response
++ * @MC_CMD_STATUS_OK: Completed successfully
++ * @MC_CMD_STATUS_READY: Ready to be processed
++ * @MC_CMD_STATUS_AUTH_ERR: Authentication error
++ * @MC_CMD_STATUS_NO_PRIVILEGE: No privilege
++ * @MC_CMD_STATUS_DMA_ERR: DMA or I/O error
++ * @MC_CMD_STATUS_CONFIG_ERR: Configuration error
++ * @MC_CMD_STATUS_TIMEOUT: Operation timed out
++ * @MC_CMD_STATUS_NO_RESOURCE: No resources
++ * @MC_CMD_STATUS_NO_MEMORY: No memory available
++ * @MC_CMD_STATUS_BUSY: Device is busy
++ * @MC_CMD_STATUS_UNSUPPORTED_OP: Unsupported operation
++ * @MC_CMD_STATUS_INVALID_STATE: Invalid state
++ */
++enum mc_cmd_status {
++ MC_CMD_STATUS_OK = 0x0,
++ MC_CMD_STATUS_READY = 0x1,
++ MC_CMD_STATUS_AUTH_ERR = 0x3,
++ MC_CMD_STATUS_NO_PRIVILEGE = 0x4,
++ MC_CMD_STATUS_DMA_ERR = 0x5,
++ MC_CMD_STATUS_CONFIG_ERR = 0x6,
++ MC_CMD_STATUS_TIMEOUT = 0x7,
++ MC_CMD_STATUS_NO_RESOURCE = 0x8,
++ MC_CMD_STATUS_NO_MEMORY = 0x9,
++ MC_CMD_STATUS_BUSY = 0xA,
++ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB,
++ MC_CMD_STATUS_INVALID_STATE = 0xC
++};
++
++/* MC command flags */
++
++/**
++ * High priority flag
++ */
++#define MC_CMD_FLAG_PRI 0x00008000
++/**
++ * Command completion flag
++ */
++#define MC_CMD_FLAG_INTR_DIS 0x01000000
++
++/**
++ * Command ID field offset
++ */
++#define MC_CMD_HDR_CMDID_O 52
++/**
++ * Command ID field size
++ */
++#define MC_CMD_HDR_CMDID_S 12
++/**
++ * Token field offset
++ */
++#define MC_CMD_HDR_TOKEN_O 38
++/**
++ * Token field size
++ */
++#define MC_CMD_HDR_TOKEN_S 10
++/**
++ * Status field offset
++ */
++#define MC_CMD_HDR_STATUS_O 16
++/**
++ * Status field size
++ */
++#define MC_CMD_HDR_STATUS_S 8
++/**
++ * Flags field offset
++ */
++#define MC_CMD_HDR_FLAGS_O 0
++/**
++ * Flags field size
++ */
++#define MC_CMD_HDR_FLAGS_S 32
++/**
++ * Command flags mask
++ */
++#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00
++
++#define MC_CMD_HDR_READ_STATUS(_hdr) \
++ ((enum mc_cmd_status)mc_dec((_hdr), \
++ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
++
++#define MC_CMD_HDR_READ_TOKEN(_hdr) \
++ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
++
++#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \
++ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg)))
++
++#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
++ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width)))
++
++#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
++ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
++
++#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
++ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
++
++static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
++ uint32_t cmd_flags,
++ uint16_t token)
++{
++ uint64_t hdr;
++
++ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
++ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S,
++ (cmd_flags & MC_CMD_HDR_FLAGS_MASK));
++ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
++ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
++ MC_CMD_STATUS_READY);
++
++ return hdr;
++}
++
++/**
++ * mc_write_command - writes a command to a Management Complex (MC) portal
++ *
++ * @portal: pointer to an MC portal
++ * @cmd: pointer to a filled command
++ */
++static inline void mc_write_command(struct mc_command __iomem *portal,
++ struct mc_command *cmd)
++{
++ int i;
++ uint32_t word;
++
++ /* copy command parameters into the portal */
++ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
++ iowrite64(cmd->params[i], &portal->params[i]);
++
++ /* submit the command by writing the header */
++ word = (uint32_t)mc_dec(cmd->header, 32, 32);
++ iowrite32(word, (((uint32_t *)&portal->header) + 1));
++
++ word = (uint32_t)mc_dec(cmd->header, 0, 32);
++ iowrite32(word, (uint32_t *)&portal->header);
++}
++
++/**
++ * mc_read_response - reads the response for the last MC command from a
++ * Management Complex (MC) portal
++ *
++ * @portal: pointer to an MC portal
++ * @resp: pointer to command response buffer
++ *
++ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
++ */
++static inline enum mc_cmd_status mc_read_response(
++ struct mc_command __iomem *portal,
++ struct mc_command *resp)
++{
++ int i;
++ enum mc_cmd_status status;
++
++ /* Copy command response header from MC portal: */
++ resp->header = ioread64(&portal->header);
++ status = MC_CMD_HDR_READ_STATUS(resp->header);
++ if (status != MC_CMD_STATUS_OK)
++ return status;
++
++ /* Copy command response data from MC portal: */
++ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
++ resp->params[i] = ioread64(&portal->params[i]);
++
++ return status;
++}
++
++#endif /* __FSL_MC_CMD_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_mc_sys.h b/drivers/net/dpaa2/mc/fsl_mc_sys.h
+new file mode 100644
+index 0000000..769c129
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_mc_sys.h
+@@ -0,0 +1,95 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_MC_SYS_H
++#define _FSL_MC_SYS_H
++
++#ifdef __linux_driver__
++
++#include <linux/errno.h>
++#include <asm/io.h>
++#include <linux/slab.h>
++
++struct fsl_mc_io {
++ void *regs;
++};
++
++#ifndef ENOTSUP
++#define ENOTSUP 95
++#endif
++
++#define ioread64(_p) readq(_p)
++#define iowrite64(_v, _p) writeq(_v, _p)
++
++#else /* __linux_driver__ */
++
++#include <stdio.h>
++#include <libio.h>
++#include <stdint.h>
++#include <errno.h>
++#include <sys/uio.h>
++#include <linux/byteorder/little_endian.h>
++
++#define cpu_to_le64(x) __cpu_to_le64(x)
++#ifndef dmb
++#define dmb() __asm__ __volatile__ ("" : : : "memory")
++#endif
++#define __iormb() dmb()
++#define __iowmb() dmb()
++#define __arch_getq(a) (*(volatile unsigned long *)(a))
++#define __arch_putq(v, a) (*(volatile unsigned long *)(a) = (v))
++#define __arch_putq32(v, a) (*(volatile unsigned int *)(a) = (v))
++#define readq(c) ({ uint64_t __v = __arch_getq(c); __iormb(); __v; })
++#define writeq(v, c) ({ uint64_t __v = v; __iowmb(); __arch_putq(__v, c); __v; })
++#define writeq32(v, c) ({ uint32_t __v = v; __iowmb(); __arch_putq32(__v, c); __v; })
++#define ioread64(_p) readq(_p)
++#define iowrite64(_v, _p) writeq(_v, _p)
++#define iowrite32(_v, _p) writeq32(_v, _p)
++#define __iomem
++
++struct fsl_mc_io {
++ void *regs;
++};
++
++#ifndef ENOTSUP
++#define ENOTSUP 95
++#endif
++
++/*GPP is supposed to use MC commands with low priority*/
++#define CMD_PRI_LOW 0 /*!< Low Priority command indication */
++
++struct mc_command;
++
++int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
++
++#endif /* __linux_driver__ */
++
++#endif /* _FSL_MC_SYS_H */
+diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h
+new file mode 100644
+index 0000000..43825b8
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/fsl_net.h
+@@ -0,0 +1,480 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef __FSL_NET_H
++#define __FSL_NET_H
++
++#define LAST_HDR_INDEX 0xFFFFFFFF
++
++/*****************************************************************************/
++/* Protocol fields */
++/*****************************************************************************/
++
++/************************* Ethernet fields *********************************/
++#define NH_FLD_ETH_DA (1)
++#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
++#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
++#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
++#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
++#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
++#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
++
++#define NH_FLD_ETH_ADDR_SIZE 6
++
++/*************************** VLAN fields ***********************************/
++#define NH_FLD_VLAN_VPRI (1)
++#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
++#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
++#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
++#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
++#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
++
++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
++ NH_FLD_VLAN_CFI | \
++ NH_FLD_VLAN_VID)
++
++/************************ IP (generic) fields ******************************/
++#define NH_FLD_IP_VER (1)
++#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
++#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
++#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
++#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
++#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
++#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
++#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
++#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
++
++#define NH_FLD_IP_PROTO_SIZE 1
++
++/***************************** IPV4 fields *********************************/
++#define NH_FLD_IPV4_VER (1)
++#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
++#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
++#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
++#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
++#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
++#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
++#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
++#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
++#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
++#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
++#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
++#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
++#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
++#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
++#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
++
++#define NH_FLD_IPV4_ADDR_SIZE 4
++#define NH_FLD_IPV4_PROTO_SIZE 1
++
++/***************************** IPV6 fields *********************************/
++#define NH_FLD_IPV6_VER (1)
++#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
++#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
++#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
++#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
++#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
++#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
++#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
++#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
++
++#define NH_FLD_IPV6_ADDR_SIZE 16
++#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
++
++/***************************** ICMP fields *********************************/
++#define NH_FLD_ICMP_TYPE (1)
++#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
++#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
++#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
++#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
++#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
++
++#define NH_FLD_ICMP_CODE_SIZE 1
++#define NH_FLD_ICMP_TYPE_SIZE 1
++
++/***************************** IGMP fields *********************************/
++#define NH_FLD_IGMP_VERSION (1)
++#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
++#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
++#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
++#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
++
++/***************************** TCP fields **********************************/
++#define NH_FLD_TCP_PORT_SRC (1)
++#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
++#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
++#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
++#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
++#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
++#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
++#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
++#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
++#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
++#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
++#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
++
++#define NH_FLD_TCP_PORT_SIZE 2
++
++/***************************** UDP fields **********************************/
++#define NH_FLD_UDP_PORT_SRC (1)
++#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
++#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
++#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
++#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
++
++#define NH_FLD_UDP_PORT_SIZE 2
++
++/*************************** UDP-lite fields *******************************/
++#define NH_FLD_UDP_LITE_PORT_SRC (1)
++#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
++#define NH_FLD_UDP_LITE_ALL_FIELDS \
++ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
++
++#define NH_FLD_UDP_LITE_PORT_SIZE 2
++
++/*************************** UDP-encap-ESP fields **************************/
++#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
++#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
++#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
++#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
++#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
++ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
++
++#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
++#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
++
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_PORT_SRC (1)
++#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
++#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
++#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
++#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
++
++#define NH_FLD_SCTP_PORT_SIZE 2
++
++/***************************** DCCP fields *********************************/
++#define NH_FLD_DCCP_PORT_SRC (1)
++#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
++#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
++
++#define NH_FLD_DCCP_PORT_SIZE 2
++
++/***************************** IPHC fields *********************************/
++#define NH_FLD_IPHC_CID (1)
++#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
++#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
++#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
++#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
++#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
++
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
++#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
++#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
++ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
++
++/*************************** L2TPV2 fields *********************************/
++#define NH_FLD_L2TPV2_TYPE_BIT (1)
++#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
++#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
++#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
++#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
++#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
++#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
++#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
++#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
++#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
++#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
++#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
++#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
++#define NH_FLD_L2TPV2_ALL_FIELDS \
++ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
++
++/*************************** L2TPV3 fields *********************************/
++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
++#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
++#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
++#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
++
++#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
++
++/**************************** PPP fields ***********************************/
++#define NH_FLD_PPP_PID (1)
++#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
++#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
++
++/************************** PPPoE fields ***********************************/
++#define NH_FLD_PPPOE_VER (1)
++#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
++#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
++#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
++#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
++#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
++#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
++#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
++
++/************************* PPP-Mux fields **********************************/
++#define NH_FLD_PPPMUX_PID (1)
++#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
++#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
++#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
++
++/*********************** PPP-Mux sub-frame fields **************************/
++#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
++#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
++#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
++#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
++#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
++ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
++
++/*************************** LLC fields ************************************/
++#define NH_FLD_LLC_DSAP (1)
++#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
++#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
++#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
++
++/*************************** NLPID fields **********************************/
++#define NH_FLD_NLPID_NLPID (1)
++#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
++
++/*************************** SNAP fields ***********************************/
++#define NH_FLD_SNAP_OUI (1)
++#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
++#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
++
++/*************************** LLC SNAP fields *******************************/
++#define NH_FLD_LLC_SNAP_TYPE (1)
++#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
++
++#define NH_FLD_ARP_HTYPE (1)
++#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
++#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
++#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
++#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
++#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
++#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
++#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
++#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
++#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
++
++/*************************** RFC2684 fields ********************************/
++#define NH_FLD_RFC2684_LLC (1)
++#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
++#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
++#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
++#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
++#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
++#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
++
++/*************************** User defined fields ***************************/
++#define NH_FLD_USER_DEFINED_SRCPORT (1)
++#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
++#define NH_FLD_USER_DEFINED_ALL_FIELDS \
++ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
++
++/*************************** Payload fields ********************************/
++#define NH_FLD_PAYLOAD_BUFFER (1)
++#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
++#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
++#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
++#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
++#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
++#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
++
++/*************************** GRE fields ************************************/
++#define NH_FLD_GRE_TYPE (1)
++#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
++
++/*************************** MINENCAP fields *******************************/
++#define NH_FLD_MINENCAP_SRC_IP (1)
++#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
++#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
++#define NH_FLD_MINENCAP_ALL_FIELDS \
++ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
++
++/*************************** IPSEC AH fields *******************************/
++#define NH_FLD_IPSEC_AH_SPI (1)
++#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
++#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
++
++/*************************** IPSEC ESP fields ******************************/
++#define NH_FLD_IPSEC_ESP_SPI (1)
++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
++#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
++
++#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
++
++/*************************** MPLS fields ***********************************/
++#define NH_FLD_MPLS_LABEL_STACK (1)
++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
++ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
++
++/*************************** MACSEC fields *********************************/
++#define NH_FLD_MACSEC_SECTAG (1)
++#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
++
++/*************************** GTP fields ************************************/
++#define NH_FLD_GTP_TEID (1)
++
++/* Protocol options */
++
++/* Ethernet options */
++#define NH_OPT_ETH_BROADCAST 1
++#define NH_OPT_ETH_MULTICAST 2
++#define NH_OPT_ETH_UNICAST 3
++#define NH_OPT_ETH_BPDU 4
++
++#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
++/* also applicable for broadcast */
++
++/* VLAN options */
++#define NH_OPT_VLAN_CFI 1
++
++/* IPV4 options */
++#define NH_OPT_IPV4_UNICAST 1
++#define NH_OPT_IPV4_MULTICAST 2
++#define NH_OPT_IPV4_BROADCAST 3
++#define NH_OPT_IPV4_OPTION 4
++#define NH_OPT_IPV4_FRAG 5
++#define NH_OPT_IPV4_INITIAL_FRAG 6
++
++/* IPV6 options */
++#define NH_OPT_IPV6_UNICAST 1
++#define NH_OPT_IPV6_MULTICAST 2
++#define NH_OPT_IPV6_OPTION 3
++#define NH_OPT_IPV6_FRAG 4
++#define NH_OPT_IPV6_INITIAL_FRAG 5
++
++/* General IP options (may be used for any version) */
++#define NH_OPT_IP_FRAG 1
++#define NH_OPT_IP_INITIAL_FRAG 2
++#define NH_OPT_IP_OPTION 3
++
++/* Minenc. options */
++#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
++
++/* GRE. options */
++#define NH_OPT_GRE_ROUTING_PRESENT 1
++
++/* TCP options */
++#define NH_OPT_TCP_OPTIONS 1
++#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
++#define NH_OPT_TCP_CONTROL_LOW_BITS 3
++
++/* CAPWAP options */
++#define NH_OPT_CAPWAP_DTLS 1
++
++enum net_prot {
++ NET_PROT_NONE = 0,
++ NET_PROT_PAYLOAD,
++ NET_PROT_ETH,
++ NET_PROT_VLAN,
++ NET_PROT_IPV4,
++ NET_PROT_IPV6,
++ NET_PROT_IP,
++ NET_PROT_TCP,
++ NET_PROT_UDP,
++ NET_PROT_UDP_LITE,
++ NET_PROT_IPHC,
++ NET_PROT_SCTP,
++ NET_PROT_SCTP_CHUNK_DATA,
++ NET_PROT_PPPOE,
++ NET_PROT_PPP,
++ NET_PROT_PPPMUX,
++ NET_PROT_PPPMUX_SUBFRM,
++ NET_PROT_L2TPV2,
++ NET_PROT_L2TPV3_CTRL,
++ NET_PROT_L2TPV3_SESS,
++ NET_PROT_LLC,
++ NET_PROT_LLC_SNAP,
++ NET_PROT_NLPID,
++ NET_PROT_SNAP,
++ NET_PROT_MPLS,
++ NET_PROT_IPSEC_AH,
++ NET_PROT_IPSEC_ESP,
++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
++ NET_PROT_MACSEC,
++ NET_PROT_GRE,
++ NET_PROT_MINENCAP,
++ NET_PROT_DCCP,
++ NET_PROT_ICMP,
++ NET_PROT_IGMP,
++ NET_PROT_ARP,
++ NET_PROT_CAPWAP_DATA,
++ NET_PROT_CAPWAP_CTRL,
++ NET_PROT_RFC2684,
++ NET_PROT_ICMPV6,
++ NET_PROT_FCOE,
++ NET_PROT_FIP,
++ NET_PROT_ISCSI,
++ NET_PROT_GTP,
++ NET_PROT_USER_DEFINED_L2,
++ NET_PROT_USER_DEFINED_L3,
++ NET_PROT_USER_DEFINED_L4,
++ NET_PROT_USER_DEFINED_L5,
++ NET_PROT_USER_DEFINED_SHIM1,
++ NET_PROT_USER_DEFINED_SHIM2,
++
++ NET_PROT_DUMMY_LAST
++};
++
++/*! IEEE8021.Q */
++#define NH_IEEE8021Q_ETYPE 0x8100
++#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
++ ((((uint32_t)(etype & 0xFFFF)) << 16) | \
++ (((uint32_t)(pcp & 0x07)) << 13) | \
++ (((uint32_t)(dei & 0x01)) << 12) | \
++ (((uint32_t)(vlan_id & 0xFFF))))
++
++#endif /* __FSL_NET_H */
+diff --git a/drivers/net/dpaa2/mc/mc_sys.c b/drivers/net/dpaa2/mc/mc_sys.c
+new file mode 100644
+index 0000000..0a88cad
+--- /dev/null
++++ b/drivers/net/dpaa2/mc/mc_sys.c
+@@ -0,0 +1,129 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <fsl_mc_sys.h>
++#include <fsl_mc_cmd.h>
++
++/* ODP framework using MC poratl in shared mode. Following
++ changes to introduce Locks must be maintained while
++ merging the FLIB.
++*/
++
++/**
++* The mc_spinlock_t type.
++*/
++typedef struct {
++ volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
++} mc_spinlock_t;
++
++/**
++* A static spinlock initializer.
++*/
++static mc_spinlock_t mc_portal_lock = { 0 };
++
++static inline void mc_pause(void) {}
++
++static inline void mc_spinlock_lock(mc_spinlock_t *sl)
++{
++ while (__sync_lock_test_and_set(&sl->locked, 1))
++ while (sl->locked)
++ mc_pause();
++}
++
++static inline void mc_spinlock_unlock(mc_spinlock_t *sl)
++{
++ __sync_lock_release(&sl->locked);
++}
++
++
++static int mc_status_to_error(enum mc_cmd_status status)
++{
++ switch (status) {
++ case MC_CMD_STATUS_OK:
++ return 0;
++ case MC_CMD_STATUS_AUTH_ERR:
++ return -EACCES; /* Token error */
++ case MC_CMD_STATUS_NO_PRIVILEGE:
++ return -EPERM; /* Permission denied */
++ case MC_CMD_STATUS_DMA_ERR:
++ return -EIO; /* Input/Output error */
++ case MC_CMD_STATUS_CONFIG_ERR:
++ return -EINVAL; /* Device not configured */
++ case MC_CMD_STATUS_TIMEOUT:
++ return -ETIMEDOUT; /* Operation timed out */
++ case MC_CMD_STATUS_NO_RESOURCE:
++ return -ENAVAIL; /* Resource temporarily unavailable */
++ case MC_CMD_STATUS_NO_MEMORY:
++ return -ENOMEM; /* Cannot allocate memory */
++ case MC_CMD_STATUS_BUSY:
++ return -EBUSY; /* Device busy */
++ case MC_CMD_STATUS_UNSUPPORTED_OP:
++ return -ENOTSUP; /* Operation not supported by device */
++ case MC_CMD_STATUS_INVALID_STATE:
++ return -ENODEV; /* Invalid device state */
++ default:
++ break;
++ }
++
++ /* Not expected to reach here */
++ return -EINVAL;
++}
++
++int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
++{
++ enum mc_cmd_status status;
++
++ if (!mc_io || !mc_io->regs)
++ return -EACCES;
++
++ /* --- Call lock function here in case portal is shared --- */
++ mc_spinlock_lock(&mc_portal_lock);
++
++ mc_write_command(mc_io->regs, cmd);
++
++ /* Spin until status changes */
++ do {
++ status = MC_CMD_HDR_READ_STATUS(ioread64(mc_io->regs));
++
++ /* --- Call wait function here to prevent blocking ---
++ * Change the loop condition accordingly to exit on timeout.
++ */
++ } while (status == MC_CMD_STATUS_READY);
++
++ /* Read the response back into the command buffer */
++ mc_read_response(mc_io->regs, cmd);
++
++ /* --- Call unlock function here in case portal is shared --- */
++ mc_spinlock_unlock(&mc_portal_lock);
++
++ return mc_status_to_error(status);
++}
++
+diff --git a/drivers/net/dpaa2/qbman/driver/qbman_debug.c b/drivers/net/dpaa2/qbman/driver/qbman_debug.c
+new file mode 100644
+index 0000000..e205681
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/driver/qbman_debug.c
+@@ -0,0 +1,926 @@
++/* Copyright (C) 2015 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qbman_portal.h"
++#include "qbman_debug.h"
++#include <drivers/fsl_qbman_portal.h>
++
++/* QBMan portal management command code */
++#define QBMAN_BP_QUERY 0x32
++#define QBMAN_FQ_QUERY 0x44
++#define QBMAN_FQ_QUERY_NP 0x45
++#define QBMAN_WQ_QUERY 0x47
++#define QBMAN_CGR_QUERY 0x51
++#define QBMAN_WRED_QUERY 0x54
++#define QBMAN_CGR_STAT_QUERY 0x55
++#define QBMAN_CGR_STAT_QUERY_CLR 0x56
++
++enum qbman_attr_usage_e {
++ qbman_attr_usage_fq,
++ qbman_attr_usage_bpool,
++ qbman_attr_usage_cgr,
++ qbman_attr_usage_wqchan
++};
++
++struct int_qbman_attr {
++ uint32_t words[32];
++ enum qbman_attr_usage_e usage;
++};
++
++#define attr_type_set(a, e) \
++{ \
++ struct qbman_attr *__attr = a; \
++ enum qbman_attr_usage_e __usage = e; \
++ ((struct int_qbman_attr *)__attr)->usage = __usage; \
++}
++
++#define ATTR32(d) (&(d)->dont_manipulate_directly[0])
++#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16])
++
++static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1);
++static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1);
++static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1);
++static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16);
++static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16);
++static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16);
++static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16);
++static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16);
++static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16);
++static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14);
++static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15);
++static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1);
++static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32);
++static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32);
++static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32);
++static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32);
++static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16);
++static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3);
++static struct qb_attr_code code_bp_fill = QB_CODE(2 , 0, 32);
++static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32);
++static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8);
++static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 8, 8);
++static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 16, 8);
++
++static void qbman_bp_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_bpool);
++}
++
++int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
++ struct qbman_attr *a)
++{
++ uint32_t *p;
++ uint32_t rslt;
++ uint32_t *attr = ATTR32(a);
++
++ qbman_bp_attr_clear(a);
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_bp_bpid, p, bpid);
++
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY);
++
++ /* Decode the outcome */
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_BP_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt);
++ return -EIO;
++ }
++
++ /* For the query, word[0] of the result contains only the
++ * verb/rslt fields, so skip word[0].
++ */
++ word_copy(&attr[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae)
++{
++ uint32_t *p = ATTR32(a);
++
++ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p);
++ *va = !!qb_attr_code_decode(&code_bp_va, p);
++ *wae = !!qb_attr_code_decode(&code_bp_wae, p);
++}
++
++static uint32_t qbman_bp_thresh_to_value(uint32_t val)
++{
++ return (val & 0xff) << ((val & 0xf00) >> 8);
++}
++
++void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet)
++{
++ uint32_t *p = ATTR32(a);
++
++ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet,
++ p));
++}
++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt)
++{
++ uint32_t *p = ATTR32(a);
++
++ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt,
++ p));
++}
++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet)
++{
++ uint32_t *p = ATTR32(a);
++
++ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet,
++ p));
++}
++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt)
++{
++ uint32_t *p = ATTR32(a);
++
++ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt,
++ p));
++}
++
++void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset)
++{
++ uint32_t *p = ATTR32(a);
++
++ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset,
++ p));
++}
++
++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt)
++{
++ uint32_t *p = ATTR32(a);
++
++ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt,
++ p));
++}
++
++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid)
++{
++ uint32_t *p = ATTR32(a);
++
++ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p);
++}
++
++void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl)
++{
++ uint32_t *p = ATTR32(a);
++
++ *icid = qb_attr_code_decode(&code_bp_icid, p);
++ *pl = !!qb_attr_code_decode(&code_bp_pl, p);
++}
++
++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr)
++{
++ uint32_t *p = ATTR32(a);
++
++ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi,
++ p) << 32) |
++ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo,
++ p);
++}
++
++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx)
++{
++ uint32_t *p = ATTR32(a);
++
++ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p)
++ << 32) |
++ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo,
++ p);
++}
++
++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ)
++{
++ uint32_t *p = ATTR32(a);
++
++ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p);
++}
++
++int qbman_bp_info_has_free_bufs(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1);
++}
++
++int qbman_bp_info_is_depleted(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2);
++}
++
++int qbman_bp_info_is_surplus(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4);
++}
++
++uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_fill, p);
++}
++
++uint32_t qbman_bp_info_hdptr(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_hdptr, p);
++}
++
++uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_sdcnt, p);
++}
++
++uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_hdcnt, p);
++}
++
++uint32_t qbman_bp_info_sscnt(struct qbman_attr *a)
++{
++ uint32_t *p = ATTR32(a);
++
++ return qb_attr_code_decode(&code_bp_sscnt, p);
++}
++
++static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24);
++static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16);
++static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15);
++static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8);
++static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15);
++static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13);
++static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12);
++static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1);
++static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1);
++static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1);
++static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1);
++static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1);
++static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1);
++static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32);
++static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32);
++static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15);
++static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1);
++static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24);
++static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24);
++
++static void qbman_fq_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_fq);
++}
++
++/* FQ query function for programmable fields */
++int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc)
++{
++ uint32_t *p;
++ uint32_t rslt;
++ uint32_t *d = ATTR32(desc);
++
++ qbman_fq_attr_clear(desc);
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ qb_attr_code_encode(&code_fq_fqid, p, fqid);
++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY);
++
++ /* Decode the outcome */
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_FQ_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of FQID 0x%x failed, code=0x%02x\n",
++ fqid, rslt);
++ return -EIO;
++ }
++ /* For the configure, word[0] of the command contains only the WE-mask.
++ * For the query, word[0] of the result contains only the verb/rslt
++ * fields. Skip word[0] in the latter case. */
++ word_copy(&d[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl)
++{
++ uint32_t *p = ATTR32(d);
++
++ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p);
++}
++
++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid)
++{
++ uint32_t *p = ATTR32(d);
++
++ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p);
++}
++
++void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq)
++{
++ uint32_t *p = ATTR32(d);
++
++ *destwq = qb_attr_code_decode(&code_fq_destwq, p);
++}
++
++void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred)
++{
++ uint32_t *p = ATTR32(d);
++
++ *icscred = qb_attr_code_decode(&code_fq_icscred, p);
++}
++
++static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5);
++static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8);
++static uint32_t qbman_thresh_to_value(uint32_t val)
++{
++ uint32_t m, e;
++
++ m = qb_attr_code_decode(&code_tdthresh_mant, &val);
++ e = qb_attr_code_decode(&code_tdthresh_exp, &val);
++ return m << e;
++}
++
++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh)
++{
++ uint32_t *p = ATTR32(d);
++
++ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh,
++ p));
++}
++
++void qbman_fq_attr_get_oa(struct qbman_attr *d,
++ int *oa_ics, int *oa_cgr, int32_t *oa_len)
++{
++ uint32_t *p = ATTR32(d);
++
++ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p);
++ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p);
++ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len,
++ qb_attr_code_decode(&code_fq_oa_len, p));
++}
++
++void qbman_fq_attr_get_mctl(struct qbman_attr *d,
++ int *bdi, int *ff, int *va, int *ps)
++{
++ uint32_t *p = ATTR32(d);
++
++ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p);
++ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p);
++ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p);
++ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p);
++}
++
++void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo)
++{
++ uint32_t *p = ATTR32(d);
++
++ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p);
++ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p);
++}
++
++void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl)
++{
++ uint32_t *p = ATTR32(d);
++
++ *icid = qb_attr_code_decode(&code_fq_icid, p);
++ *pl = !!qb_attr_code_decode(&code_fq_pl, p);
++}
++
++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid)
++{
++ uint32_t *p = ATTR32(d);
++
++ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p);
++}
++
++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid)
++{
++ uint32_t *p = ATTR32(d);
++
++ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p);
++}
++
++/* Query FQ Non-Programmalbe Fields */
++static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3);
++static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1);
++static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1);
++static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1);
++static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1);
++static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24);
++static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32);
++
++int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
++ struct qbman_attr *state)
++{
++ uint32_t *p;
++ uint32_t rslt;
++ uint32_t *d = ATTR32(state);
++
++ qbman_fq_attr_clear(state);
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ qb_attr_code_encode(&code_fq_fqid, p, fqid);
++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
++
++ /* Decode the outcome */
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_FQ_QUERY_NP);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
++ fqid, rslt);
++ return -EIO;
++ }
++ word_copy(&d[0], &p[0], 16);
++ return 0;
++}
++
++uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_state, p);
++}
++
++int qbman_fq_state_force_eligible(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_fe, p);
++}
++
++int qbman_fq_state_xoff(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_x, p);
++}
++
++int qbman_fq_state_retirement_pending(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_r, p);
++}
++
++int qbman_fq_state_overflow_error(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return !!qb_attr_code_decode(&code_fq_np_oe, p);
++}
++
++uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_frm_cnt, p);
++}
++
++uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state)
++{
++ const uint32_t *p = ATTR32(state);
++
++ return qb_attr_code_decode(&code_fq_np_byte_cnt, p);
++}
++
++/* Query CGR */
++static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1);
++static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1);
++static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1);
++static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2);
++static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1);
++static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1);
++static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1);
++static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1);
++static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1);
++static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1);
++static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1);
++static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1);
++static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5);
++static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1);
++static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13);
++static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13);
++static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13);
++static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16);
++static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16);
++static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16);
++static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15);
++static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1);
++static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32);
++static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32);
++static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32);
++static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32);
++
++static void qbman_cgr_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_cgr);
++}
++
++int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++ uint32_t *d[2];
++ int i;
++ uint32_t query_verb;
++
++ d[0] = ATTR32(attr);
++ d[1] = ATTR32_1(attr);
++
++ qbman_cgr_attr_clear(attr);
++
++ for (i = 0; i < 2; i++) {
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY;
++
++ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != query_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query CGID 0x%x failed,", cgid);
++ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt);
++ return -EIO;
++ }
++ /* For the configure, word[0] of the command contains only the
++ * verb/cgid. For the query, word[0] of the result contains
++ * only the verb/rslt fields. Skip word[0] in the latter case.
++ */
++ word_copy(&d[i][1], &p[1], 15);
++ }
++ return 0;
++}
++
++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
++ int *cscn_wq_en_exit, int *cscn_wq_icd)
++ {
++ uint32_t *p = ATTR32(d);
++ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter,
++ p);
++ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p);
++ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p);
++}
++
++void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode,
++ int *rej_cnt_mode, int *cscn_bdi)
++{
++ uint32_t *p = ATTR32(d);
++ *mode = qb_attr_code_decode(&code_cgr_mode, p);
++ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p);
++ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p);
++}
++
++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
++ int *cscn_wr_en_exit, int *cg_wr_ae,
++ int *cscn_dcp_en, int *cg_wr_va)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter,
++ p);
++ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p);
++ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p);
++ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p);
++ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p);
++}
++
++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
++ uint32_t *i_cnt_wr_bnd)
++{
++ uint32_t *p = ATTR32(d);
++ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p);
++ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p);
++}
++
++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en)
++{
++ uint32_t *p = ATTR32(d);
++ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p);
++}
++
++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres)
++{
++ uint32_t *p = ATTR32(d);
++ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_cs_thres, p));
++}
++
++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
++ uint32_t *cs_thres_x)
++{
++ uint32_t *p = ATTR32(d);
++ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_cs_thres_x, p));
++}
++
++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres)
++{
++ uint32_t *p = ATTR32(d);
++ *td_thres = qbman_thresh_to_value(qb_attr_code_decode(
++ &code_cgr_td_thres, p));
++}
++
++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p);
++}
++
++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p);
++}
++
++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
++ uint32_t *cscn_vcgid)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p);
++}
++
++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid,
++ int *pl)
++{
++ uint32_t *p = ATTR32(d);
++ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p);
++ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p);
++}
++
++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
++ uint64_t *cg_wr_addr)
++{
++ uint32_t *p = ATTR32(d);
++ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi,
++ p) << 32) |
++ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo,
++ p);
++}
++
++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx)
++{
++ uint32_t *p = ATTR32(d);
++ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p)
++ << 32) |
++ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p);
++}
++
++#define WRED_EDP_WORD(n) (18 + n/4)
++#define WRED_EDP_OFFSET(n) (8 * (n % 4))
++#define WRED_PARM_DP_WORD(n) (n + 20)
++#define WRED_WE_EDP(n) (16 + n * 2)
++#define WRED_WE_PARM_DP(n) (17 + n * 2)
++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx,
++ int *edp)
++{
++ uint32_t *p = ATTR32(d);
++ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx),
++ WRED_EDP_OFFSET(idx), 8);
++ *edp = (int)qb_attr_code_decode(&code_wred_edp, p);
++}
++
++void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth,
++ uint64_t *maxth, uint8_t *maxp)
++{
++ uint8_t ma, mn, step_i, step_s, pn;
++
++ ma = (uint8_t)(dp >> 24);
++ mn = (uint8_t)(dp >> 19) & 0x1f;
++ step_i = (uint8_t)(dp >> 11);
++ step_s = (uint8_t)(dp >> 6) & 0x1f;
++ pn = (uint8_t)dp & 0x3f;
++
++ *maxp = (uint8_t)(((pn<<2) * 100)/256);
++
++ if (mn == 0)
++ *maxth = ma;
++ else
++ *maxth = ((ma+256) * (1<<(mn-1)));
++
++ if (step_s == 0)
++ *minth = *maxth - step_i;
++ else
++ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1));
++}
++
++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx,
++ uint32_t *dp)
++{
++ uint32_t *p = ATTR32(d);
++ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx),
++ 0, 8);
++ *dp = qb_attr_code_decode(&code_wred_parm_dp, p);
++}
++
++/* Query CGR/CCGR/CQ statistics */
++static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8);
++static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32);
++static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16);
++static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid,
++ int clear, uint32_t command_type,
++ uint64_t *frame_cnt, uint64_t *byte_cnt)
++{
++ uint32_t *p;
++ uint32_t verb, rslt;
++ uint32_t query_verb;
++ uint32_t hi, lo;
++
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
++ if (command_type < 2)
++ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type);
++ query_verb = clear ?
++ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY;
++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++
++ /* Decode the outcome */
++ verb = qb_attr_code_decode(&code_generic_verb, p);
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(verb != query_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query statistics of CGID 0x%x failed,", cgid);
++ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt);
++ return -EIO;
++ }
++
++ if (*frame_cnt) {
++ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p);
++ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p);
++ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo;
++ }
++ if (*byte_cnt) {
++ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p);
++ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p);
++ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo;
++ }
++
++ return 0;
++}
++
++int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 0xff,
++ frame_cnt, byte_cnt);
++}
++
++int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 1,
++ frame_cnt, byte_cnt);
++}
++
++int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt)
++{
++ return qbman_cgr_statistics_query(s, cgid, clear, 0,
++ frame_cnt, byte_cnt);
++}
++
++/* WQ Chan Query */
++static struct qb_attr_code code_wqchan_chanid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_wqchan_cdan_ctx_lo = QB_CODE(2, 0, 32);
++static struct qb_attr_code code_wqchan_cdan_ctx_hi = QB_CODE(3, 0, 32);
++static struct qb_attr_code code_wqchan_cdan_wqid = QB_CODE(1, 16, 16);
++static struct qb_attr_code code_wqchan_ctrl = QB_CODE(1, 8, 8);
++
++static void qbman_wqchan_attr_clear(struct qbman_attr *a)
++{
++ memset(a, 0, sizeof(*a));
++ attr_type_set(a, qbman_attr_usage_wqchan);
++}
++
++int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid,
++ struct qbman_attr *a)
++{
++ uint32_t *p;
++ uint32_t rslt;
++ uint32_t *attr = ATTR32(a);
++
++ qbman_wqchan_attr_clear(a);
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_wqchan_chanid, p, chanid);
++
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQ_QUERY);
++
++ /* Decode the outcome */
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(qb_attr_code_decode(&code_generic_verb, p); != QBMAN_WQ_QUERY);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Query of WQCHAN 0x%x failed, code=0x%02x\n",
++ chanid, rslt);
++ return -EIO;
++ }
++
++ /* For the query, word[0] of the result contains only the
++ * verb/rslt fields, so skip word[0].
++ */
++ word_copy(&attr[1], &p[1], 15);
++ return 0;
++}
++
++void qbman_wqchan_attr_get_wqlen(struct qbman_attr *attr, int wq, uint32_t *len)
++{
++ uint32_t *p = ATTR32(attr);
++ struct qb_attr_code code_wqchan_len = QB_CODE(wq+ 8, 0, 24);
++ *len = qb_attr_code_decode(&code_wqchan_len, p);
++}
++
++void qbman_wqchan_attr_get_cdan_ctx(struct qbman_attr *attr, uint64_t *cdan_ctx)
++{
++ uint32_t lo, hi;
++ uint32_t *p = ATTR32(attr);
++
++ lo = qb_attr_code_decode(&code_wqchan_cdan_ctx_lo, p);
++ hi = qb_attr_code_decode(&code_wqchan_cdan_ctx_hi, p);
++ *cdan_ctx = ((uint64_t)hi << 32) | (uint64_t)lo;
++}
++
++void qbman_wqchan_attr_get_cdan_wqid(struct qbman_attr *attr,
++ uint16_t *cdan_wqid)
++{
++ uint32_t *p = ATTR32(attr);
++ *cdan_wqid = (uint16_t)qb_attr_code_decode(&code_wqchan_cdan_wqid, p);
++}
++
++void qbman_wqchan_attr_get_ctrl(struct qbman_attr *attr, uint8_t *ctrl)
++{
++ uint32_t *p = ATTR32(attr);
++ *ctrl = (uint8_t)qb_attr_code_decode(&code_wqchan_ctrl, p);
++}
++void qbman_wqchan_attr_get_chanid(struct qbman_attr *attr, uint16_t *chanid)
++{
++ uint32_t *p = ATTR32(attr);
++ *chanid = (uint16_t)qb_attr_code_decode(&code_wqchan_chanid, p);
++}
++
+diff --git a/drivers/net/dpaa2/qbman/driver/qbman_debug.h b/drivers/net/dpaa2/qbman/driver/qbman_debug.h
+new file mode 100644
+index 0000000..8c89731
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/driver/qbman_debug.h
+@@ -0,0 +1,140 @@
++/* Copyright (C) 2015 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++struct qbman_attr {
++ uint32_t dont_manipulate_directly[40];
++};
++
++/* Buffer pool query commands */
++int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
++ struct qbman_attr *a);
++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae);
++void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet);
++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt);
++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet);
++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt);
++void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset);
++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt);
++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid);
++void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl);
++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr);
++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx);
++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ);
++int qbman_bp_info_has_free_bufs(struct qbman_attr *a);
++int qbman_bp_info_is_depleted(struct qbman_attr *a);
++int qbman_bp_info_is_surplus(struct qbman_attr *a);
++uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a);
++uint32_t qbman_bp_info_hdptr(struct qbman_attr *a);
++uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a);
++uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a);
++uint32_t qbman_bp_info_sscnt(struct qbman_attr *a);
++
++/* FQ query function for programmable fields */
++int qbman_fq_query(struct qbman_swp *s, uint32_t fqid,
++ struct qbman_attr *desc);
++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl);
++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid);
++void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq);
++void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred);
++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh);
++void qbman_fq_attr_get_oa(struct qbman_attr *d,
++ int *oa_ics, int *oa_cgr, int32_t *oa_len);
++void qbman_fq_attr_get_mctl(struct qbman_attr *d,
++ int *bdi, int *ff, int *va, int *ps);
++void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo);
++void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl);
++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid);
++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid);
++
++/* FQ query command for non-programmable fields*/
++enum qbman_fq_schedstate_e {
++ qbman_fq_schedstate_oos = 0,
++ qbman_fq_schedstate_retired,
++ qbman_fq_schedstate_tentatively_scheduled,
++ qbman_fq_schedstate_truly_scheduled,
++ qbman_fq_schedstate_parked,
++ qbman_fq_schedstate_held_active,
++};
++
++int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
++ struct qbman_attr *state);
++uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state);
++int qbman_fq_state_force_eligible(const struct qbman_attr *state);
++int qbman_fq_state_xoff(const struct qbman_attr *state);
++int qbman_fq_state_retirement_pending(const struct qbman_attr *state);
++int qbman_fq_state_overflow_error(const struct qbman_attr *state);
++uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state);
++uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state);
++
++/* CGR query */
++int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid,
++ struct qbman_attr *attr);
++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
++ int *cscn_wq_en_exit, int *cscn_wq_icd);
++void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode,
++ int *rej_cnt_mode, int *cscn_bdi);
++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
++ int *cscn_wr_en_exit, int *cg_wr_ae,
++ int *cscn_dcp_en, int *cg_wr_va);
++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
++ uint32_t *i_cnt_wr_bnd);
++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en);
++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres);
++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
++ uint32_t *cs_thres_x);
++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres);
++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp);
++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid);
++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
++ uint32_t *cscn_vcgid);
++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid,
++ int *pl);
++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
++ uint64_t *cg_wr_addr);
++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx);
++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx,
++ int *edp);
++void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth,
++ uint64_t *maxth, uint8_t *maxp);
++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx,
++ uint32_t *dp);
++
++/* CGR/CCGR/CQ statistics query */
++int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt);
++int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt);
++int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
++ uint64_t *frame_cnt, uint64_t *byte_cnt);
++
++/* Query Work Queue Channel */
++int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid,
++ struct qbman_attr *attr);
++void qbman_wqchan_attr_get_wqlen(struct qbman_attr *attr, int wq, uint32_t *len);
++void qbman_wqchan_attr_get_cdan_ctx(struct qbman_attr *attr, uint64_t *cdan_ctx);
++void qbman_wqchan_attr_get_cdan_wqid(struct qbman_attr *attr,
++ uint16_t *cdan_wqid);
++void qbman_wqchan_attr_get_ctrl(struct qbman_attr *attr, uint8_t *ctrl);
++void qbman_wqchan_attr_get_chanid(struct qbman_attr *attr, uint16_t *chanid);
+diff --git a/drivers/net/dpaa2/qbman/driver/qbman_portal.c b/drivers/net/dpaa2/qbman/driver/qbman_portal.c
+new file mode 100644
+index 0000000..464f386
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/driver/qbman_portal.c
+@@ -0,0 +1,1407 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qbman_portal.h"
++
++/* QBMan portal management command codes */
++#define QBMAN_MC_ACQUIRE 0x30
++#define QBMAN_WQCHAN_CONFIGURE 0x46
++
++/* CINH register offsets */
++#define QBMAN_CINH_SWP_EQCR_PI 0x800
++#define QBMAN_CINH_SWP_EQCR_CI 0x840
++#define QBMAN_CINH_SWP_EQAR 0x8c0
++#define QBMAN_CINH_SWP_DQPI 0xa00
++#define QBMAN_CINH_SWP_DCAP 0xac0
++#define QBMAN_CINH_SWP_SDQCR 0xb00
++#define QBMAN_CINH_SWP_RAR 0xcc0
++#define QBMAN_CINH_SWP_ISR 0xe00
++#define QBMAN_CINH_SWP_IER 0xe40
++#define QBMAN_CINH_SWP_ISDR 0xe80
++#define QBMAN_CINH_SWP_IIR 0xec0
++
++/* CENA register offsets */
++#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
++#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
++#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
++#define QBMAN_CENA_SWP_CR 0x600
++#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
++#define QBMAN_CENA_SWP_VDQCR 0x780
++#define QBMAN_CENA_SWP_EQCR_CI 0x840
++
++/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
++#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
++
++/* QBMan FQ management command codes */
++#define QBMAN_FQ_SCHEDULE 0x48
++#define QBMAN_FQ_FORCE 0x49
++#define QBMAN_FQ_XON 0x4d
++#define QBMAN_FQ_XOFF 0x4e
++
++/*******************************/
++/* Pre-defined attribute codes */
++/*******************************/
++
++struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
++struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
++
++/*************************/
++/* SDQCR attribute codes */
++/*************************/
++
++/* we put these here because at least some of them are required by
++ * qbman_swp_init() */
++struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
++struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
++struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
++#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
++enum qbman_sdqcr_dct {
++ qbman_sdqcr_dct_null = 0,
++ qbman_sdqcr_dct_prio_ics,
++ qbman_sdqcr_dct_active_ics,
++ qbman_sdqcr_dct_active
++};
++enum qbman_sdqcr_fc {
++ qbman_sdqcr_fc_one = 0,
++ qbman_sdqcr_fc_up_to_3 = 1
++};
++struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
++
++/*********************************/
++/* Portal constructor/destructor */
++/*********************************/
++
++/* Software portals should always be in the power-on state when we initialise,
++ * due to the CCSR-based portal reset functionality that MC has.
++ *
++ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
++ * valid-bits, so we need to support a workaround where we don't trust
++ * valid-bits when detecting new entries until any stale ring entries have been
++ * overwritten at least once. The idea is that we read PI for the first few
++ * entries, then switch to valid-bit after that. The trick is to clear the
++ * bug-work-around boolean once the PI wraps around the ring for the first time.
++ *
++ * Note: this still carries a slight additional cost once the decrementer hits
++ * zero.
++ */
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
++{
++ int ret;
++ uint32_t eqcr_pi;
++ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
++ if (!p)
++ return NULL;
++ p->desc = d;
++#ifdef QBMAN_CHECKING
++ p->mc.check = swp_mc_can_start;
++#endif
++ p->mc.valid_bit = QB_VALID_BIT;
++ p->sdq = 0;
++ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
++ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
++ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
++ atomic_set(&p->vdq.busy, 1);
++ p->vdq.valid_bit = QB_VALID_BIT;
++ p->dqrr.next_idx = 0;
++ p->dqrr.valid_bit = QB_VALID_BIT;
++ qman_version = p->desc->qman_version;
++ if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
++ p->dqrr.dqrr_size = 4;
++ p->dqrr.reset_bug = 1;
++ } else {
++ p->dqrr.dqrr_size = 8;
++ p->dqrr.reset_bug = 0;
++ }
++
++ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
++ if (ret) {
++ kfree(p);
++ pr_err("qbman_swp_sys_init() failed %d\n", ret);
++ return NULL;
++ }
++ /* SDQCR needs to be initialized to 0 when no channels are
++ being dequeued from or else the QMan HW will indicate an
++ error. The values that were calculated above will be
++ applied when dequeues from a specific channel are enabled */
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
++ eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
++ p->eqcr.pi = eqcr_pi & 0xF;
++ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
++ p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
++ p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
++ p->eqcr.ci, p->eqcr.pi);
++
++ return p;
++}
++
++void qbman_swp_finish(struct qbman_swp *p)
++{
++#ifdef QBMAN_CHECKING
++ BUG_ON(p->mc.check != swp_mc_can_start);
++#endif
++ qbman_swp_sys_finish(&p->sys);
++ kfree(p);
++}
++
++const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
++{
++ return p->desc;
++}
++
++/**************/
++/* Interrupts */
++/**************/
++
++uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
++{
++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
++}
++
++void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
++{
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
++}
++
++uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
++{
++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
++}
++
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
++{
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
++}
++
++uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
++{
++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
++}
++
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
++{
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
++}
++
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
++{
++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
++}
++
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
++{
++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
++}
++
++/***********************/
++/* Management commands */
++/***********************/
++
++/*
++ * Internal code common to all types of management commands.
++ */
++
++void *qbman_swp_mc_start(struct qbman_swp *p)
++{
++ void *ret;
++#ifdef QBMAN_CHECKING
++ BUG_ON(p->mc.check != swp_mc_can_start);
++#endif
++ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
++#ifdef QBMAN_CHECKING
++ if (!ret)
++ p->mc.check = swp_mc_can_submit;
++#endif
++ return ret;
++}
++
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
++{
++ uint32_t *v = cmd;
++#ifdef QBMAN_CHECKING
++ BUG_ON(!p->mc.check != swp_mc_can_submit);
++#endif
++ /* TBD: "|=" is going to hurt performance. Need to move as many fields
++ * out of word zero, and for those that remain, the "OR" needs to occur
++ * at the caller side. This debug check helps to catch cases where the
++ * caller wants to OR but has forgotten to do so. */
++ BUG_ON((*v & cmd_verb) != *v);
++ *v = cmd_verb | p->mc.valid_bit;
++ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
++#ifdef QBMAN_CHECKING
++ p->mc.check = swp_mc_can_poll;
++#endif
++}
++
++void *qbman_swp_mc_result(struct qbman_swp *p)
++{
++ uint32_t *ret, verb;
++#ifdef QBMAN_CHECKING
++ BUG_ON(p->mc.check != swp_mc_can_poll);
++#endif
++ qbman_cena_invalidate_prefetch(&p->sys,
++ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ /* Remove the valid-bit - command completed iff the rest is non-zero */
++ verb = ret[0] & ~QB_VALID_BIT;
++ if (!verb)
++ return NULL;
++#ifdef QBMAN_CHECKING
++ p->mc.check = swp_mc_can_start;
++#endif
++ p->mc.valid_bit ^= QB_VALID_BIT;
++ return ret;
++}
++
++/***********/
++/* Enqueue */
++/***********/
++
++/* These should be const, eventually */
++static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
++static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
++static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
++static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
++static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2);
++static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
++static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
++static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
++static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
++static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
++static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
++/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
++static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
++static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
++static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
++static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
++static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
++static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
++
++enum qbman_eq_cmd_e {
++ /* No enqueue, primarily for plugging ORP gaps for dropped frames */
++ qbman_eq_cmd_empty,
++ /* DMA an enqueue response once complete */
++ qbman_eq_cmd_respond,
++ /* DMA an enqueue response only if the enqueue fails */
++ qbman_eq_cmd_respond_reject
++};
++
++void qbman_eq_desc_clear(struct qbman_eq_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_orp_en, cl, 0);
++ qb_attr_code_encode(&code_eq_cmd, cl,
++ respond_success ? qbman_eq_cmd_respond :
++ qbman_eq_cmd_respond_reject);
++}
++
++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
++ uint32_t opr_id, uint32_t seqnum, int incomplete)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
++ qb_attr_code_encode(&code_eq_cmd, cl,
++ respond_success ? qbman_eq_cmd_respond :
++ qbman_eq_cmd_respond_reject);
++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
++ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
++}
++
++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
++ uint32_t seqnum)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
++ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
++ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
++ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
++}
++
++void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
++ uint32_t seqnum)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
++ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
++ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
++ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
++}
++
++void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
++ dma_addr_t storage_phys,
++ int stash)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
++ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
++}
++
++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
++}
++
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_qd_en, cl, 0);
++ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
++}
++
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
++ uint32_t qd_bin, uint32_t qd_prio)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_qd_en, cl, 1);
++ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
++ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
++ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
++}
++
++void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
++}
++
++void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
++ uint32_t dqrr_idx, int park)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
++ if (enable) {
++ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
++ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
++ }
++}
++
++#define EQAR_IDX(eqar) ((eqar) & 0x7)
++#define EQAR_VB(eqar) ((eqar) & 0x80)
++#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
++static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
++ const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd)
++{
++ uint32_t *p;
++ const uint32_t *cl = qb_cl(d);
++ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
++ pr_debug("EQAR=%08x\n", eqar);
++ if (!EQAR_SUCCESS(eqar))
++ return -EBUSY;
++ p = qbman_cena_write_start_wo_shadow(&s->sys,
++ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
++ word_copy(&p[1], &cl[1], 7);
++ word_copy(&p[8], fd, sizeof(*fd) >> 2);
++ /* Set the verb byte, have to substitute in the valid-bit */
++ lwsync();
++ p[0] = cl[0] | EQAR_VB(eqar);
++ qbman_cena_write_complete_wo_shadow(&s->sys,
++ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
++ return 0;
++}
++
++static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
++ const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd)
++{
++ uint32_t *p;
++ const uint32_t *cl = qb_cl(d);
++ uint32_t eqcr_ci;
++ uint8_t diff;
++
++ if (!s->eqcr.available) {
++ eqcr_ci = s->eqcr.ci;
++ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
++ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
++ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
++ eqcr_ci, s->eqcr.ci);
++ s->eqcr.available += diff;
++ if (!diff)
++ return -EBUSY;
++ }
++
++ p = qbman_cena_write_start_wo_shadow(&s->sys,
++ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
++ word_copy(&p[1], &cl[1], 7);
++ word_copy(&p[8], fd, sizeof(*fd) >> 2);
++ lwsync();
++ /* Set the verb byte, have to substitute in the valid-bit */
++ p[0] = cl[0] | s->eqcr.pi_vb;
++ qbman_cena_write_complete_wo_shadow(&s->sys,
++ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
++ s->eqcr.pi++;
++ s->eqcr.pi &= 0xF;
++ s->eqcr.available--;
++ if (!(s->eqcr.pi & 7))
++ s->eqcr.pi_vb ^= QB_VALID_BIT;
++ return 0;
++}
++
++int qbman_swp_fill_ring(struct qbman_swp *s,
++ const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd,
++ __attribute__((unused)) uint8_t burst_index)
++{
++ uint32_t *p;
++ const uint32_t *cl = qb_cl(d);
++ uint32_t eqcr_ci;
++ uint8_t diff;
++
++ if (!s->eqcr.available) {
++ eqcr_ci = s->eqcr.ci;
++ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
++ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
++ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
++ eqcr_ci, s->eqcr.ci);
++ s->eqcr.available += diff;
++ if (!diff) {
++ return -EBUSY;
++ }
++ }
++ p = qbman_cena_write_start_wo_shadow(&s->sys,
++ QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
++ //word_copy(&p[1], &cl[1], 7);
++ memcpy(&p[1], &cl[1], 7);
++ /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
++ memcpy(&p[8], fd, sizeof(struct qbman_fd));
++
++ //lwsync();
++
++ p[0] = cl[0] | s->eqcr.pi_vb;
++
++ s->eqcr.pi++;
++ s->eqcr.pi &= 0xF;
++ s->eqcr.available--;
++ if (!(s->eqcr.pi & 7))
++ s->eqcr.pi_vb ^= QB_VALID_BIT;
++
++ return 0;
++}
++
++int qbman_swp_flush_ring(struct qbman_swp *s)
++{
++ void *ptr = s->sys.addr_cena;
++ dcbf((uint64_t)ptr);
++ dcbf((uint64_t)ptr + 0x40);
++ dcbf((uint64_t)ptr + 0x80);
++ dcbf((uint64_t)ptr + 0xc0);
++ dcbf((uint64_t)ptr + 0x100);
++ dcbf((uint64_t)ptr + 0x140);
++ dcbf((uint64_t)ptr + 0x180);
++ dcbf((uint64_t)ptr + 0x1c0);
++
++ return 0;
++}
++
++void qbman_sync(void)
++{
++ lwsync();
++}
++
++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd)
++{
++ if (s->sys.eqcr_mode == qman_eqcr_vb_array)
++ return qbman_swp_enqueue_array_mode(s, d, fd);
++ else /* Use ring mode by default */
++ return qbman_swp_enqueue_ring_mode(s, d, fd);
++}
++
++/*************************/
++/* Static (push) dequeue */
++/*************************/
++
++void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
++{
++ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
++
++ BUG_ON(channel_idx > 15);
++ *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
++}
++
++void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
++{
++ uint16_t dqsrc;
++ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
++ BUG_ON(channel_idx > 15);
++ qb_attr_code_encode(&code, &s->sdq, !!enable);
++ /* Read make the complete src map. If no channels are enabled
++ the SDQCR must be 0 or else QMan will assert errors */
++ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
++ if (dqsrc != 0)
++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
++ else
++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
++}
++
++/***************************/
++/* Volatile (pull) dequeue */
++/***************************/
++
++/* These should be const, eventually */
++static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
++static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
++static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
++static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
++static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
++static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
++static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
++static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
++
++enum qb_pull_dt_e {
++ qb_pull_dt_channel,
++ qb_pull_dt_workqueue,
++ qb_pull_dt_framequeue
++};
++
++void qbman_pull_desc_clear(struct qbman_pull_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct qbman_result *storage,
++ dma_addr_t storage_phys,
++ int stash)
++{
++ uint32_t *cl = qb_cl(d);
++ /* Squiggle the pointer 'storage' into the extra 2 words of the
++ * descriptor (which aren't copied to the hw command) */
++ *(void **)&cl[4] = storage;
++ if (!storage) {
++ qb_attr_code_encode(&code_pull_rls, cl, 0);
++ return;
++ }
++ qb_attr_code_encode(&code_pull_rls, cl, 1);
++ qb_attr_code_encode(&code_pull_stash, cl, !!stash);
++ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
++}
++
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
++{
++ uint32_t *cl = qb_cl(d);
++ BUG_ON(!numframes || (numframes > 16));
++ qb_attr_code_encode(&code_pull_numframes, cl,
++ (uint32_t)(numframes - 1));
++}
++
++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_pull_token, cl, token);
++}
++
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_pull_dct, cl, 1);
++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
++ qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
++}
++
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
++ enum qbman_pull_type_e dct)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_pull_dct, cl, dct);
++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
++ qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
++}
++
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
++ enum qbman_pull_type_e dct)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_pull_dct, cl, dct);
++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
++ qb_attr_code_encode(&code_pull_dqsource, cl, chid);
++}
++
++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
++{
++ uint32_t *p;
++ uint32_t *cl = qb_cl(d);
++ if (!atomic_dec_and_test(&s->vdq.busy)) {
++ atomic_inc(&s->vdq.busy);
++ return -EBUSY;
++ }
++ s->vdq.storage = *(void **)&cl[4];
++ qb_attr_code_encode(&code_pull_token, cl, 1);
++ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
++ word_copy(&p[1], &cl[1], 3);
++ /* Set the verb byte, have to substitute in the valid-bit */
++ lwsync();
++ p[0] = cl[0] | s->vdq.valid_bit;
++ s->vdq.valid_bit ^= QB_VALID_BIT;
++ qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
++ return 0;
++}
++
++/****************/
++/* Polling DQRR */
++/****************/
++
++static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
++static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
++static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
++static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
++static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
++/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
++static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
++static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
++static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
++static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
++
++#define QBMAN_RESULT_DQ 0x60
++#define QBMAN_RESULT_FQRN 0x21
++#define QBMAN_RESULT_FQRNI 0x22
++#define QBMAN_RESULT_FQPN 0x24
++#define QBMAN_RESULT_FQDAN 0x25
++#define QBMAN_RESULT_CDAN 0x26
++#define QBMAN_RESULT_CSCN_MEM 0x27
++#define QBMAN_RESULT_CGCU 0x28
++#define QBMAN_RESULT_BPSCN 0x29
++#define QBMAN_RESULT_CSCN_WQ 0x2a
++
++static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
++
++/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
++ * only once, so repeated calls can return a sequence of DQRR entries, without
++ * requiring they be consumed immediately or in any particular order. */
++const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
++{
++ uint32_t verb;
++ uint32_t response_verb;
++ uint32_t flags;
++ const struct qbman_result *dq;
++ const uint32_t *p;
++
++ /* Before using valid-bit to detect if something is there, we have to
++ * handle the case of the DQRR reset bug... */
++ if (unlikely(s->dqrr.reset_bug)) {
++ /* We pick up new entries by cache-inhibited producer index,
++ * which means that a non-coherent mapping would require us to
++ * invalidate and read *only* once that PI has indicated that
++ * there's an entry here. The first trip around the DQRR ring
++ * will be much less efficient than all subsequent trips around
++ * it...
++ */
++ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
++ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
++ /* there are new entries iff pi != next_idx */
++ if (pi == s->dqrr.next_idx)
++ return NULL;
++ /* if next_idx is/was the last ring index, and 'pi' is
++ * different, we can disable the workaround as all the ring
++ * entries have now been DMA'd to so valid-bit checking is
++ * repaired. Note: this logic needs to be based on next_idx
++ * (which increments one at a time), rather than on pi (which
++ * can burst and wrap-around between our snapshots of it).
++ */
++ BUG_ON((s->dqrr.dqrr_size - 1) < 0);
++ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
++ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
++ s->dqrr.next_idx, pi);
++ s->dqrr.reset_bug = 0;
++ }
++ qbman_cena_invalidate_prefetch(&s->sys,
++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ }
++ dq = qbman_cena_read_wo_shadow(&s->sys,
++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ p = qb_cl(dq);
++ verb = qb_attr_code_decode(&code_dqrr_verb, p);
++ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
++ * in the DQRR reset bug workaround, we shouldn't need to skip these
++ * check, because we've already determined that a new entry is available
++ * and we've invalidated the cacheline before reading it, so the
++ * valid-bit behaviour is repaired and should tell us what we already
++ * knew from reading PI.
++ */
++ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
++ return NULL;
++
++ /* There's something there. Move "next_idx" attention to the next ring
++ * entry (and prefetch it) before returning what we found. */
++ s->dqrr.next_idx++;
++ if (s->dqrr.next_idx == QBMAN_DQRR_SIZE) {
++ s->dqrr.next_idx = 0;
++ s->dqrr.valid_bit ^= QB_VALID_BIT;
++ }
++ /* If this is the final response to a volatile dequeue command
++ indicate that the vdq is no longer busy */
++ flags = qbman_result_DQ_flags(dq);
++ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
++ if ((response_verb == QBMAN_RESULT_DQ) &&
++ (flags & QBMAN_DQ_STAT_VOLATILE) &&
++ (flags & QBMAN_DQ_STAT_EXPIRED))
++ atomic_inc(&s->vdq.busy);
++
++ return dq;
++}
++
++/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
++void qbman_swp_dqrr_consume(struct qbman_swp *s,
++ const struct qbman_result *dq)
++{
++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
++}
++
++/*********************************/
++/* Polling user-provided storage */
++/*********************************/
++
++int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
++ const struct qbman_result *dq)
++{
++ /* To avoid converting the little-endian DQ entry to host-endian prior
++ * to us knowing whether there is a valid entry or not (and run the
++ * risk of corrupting the incoming hardware LE write), we detect in
++ * hardware endianness rather than host. This means we need a different
++ * "code" depending on whether we are BE or LE in software, which is
++ * where DQRR_TOK_OFFSET comes in... */
++ static struct qb_attr_code code_dqrr_tok_detect =
++ QB_CODE(0, DQRR_TOK_OFFSET, 8);
++ /* The user trying to poll for a result treats "dq" as const. It is
++ * however the same address that was provided to us non-const in the
++ * first place, for directing hardware DMA to. So we can cast away the
++ * const because it is mutable from our perspective. */
++ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
++ uint32_t token;
++
++ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
++ if (token != 1)
++ return 0;
++ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
++
++ /* Only now do we convert from hardware to host endianness. Also, as we
++ * are returning success, the user has promised not to call us again, so
++ * there's no risk of us converting the endianness twice... */
++ make_le32_n(p, 16);
++ return 1;
++}
++
++int qbman_check_command_complete(struct qbman_swp *s,
++ const struct qbman_result *dq)
++{
++ /* To avoid converting the little-endian DQ entry to host-endian prior
++ * to us knowing whether there is a valid entry or not (and run the
++ * risk of corrupting the incoming hardware LE write), we detect in
++ * hardware endianness rather than host. This means we need a different
++ * "code" depending on whether we are BE or LE in software, which is
++ * where DQRR_TOK_OFFSET comes in... */
++ static struct qb_attr_code code_dqrr_tok_detect =
++ QB_CODE(0, DQRR_TOK_OFFSET, 8);
++ /* The user trying to poll for a result treats "dq" as const. It is
++ * however the same address that was provided to us non-const in the
++ * first place, for directing hardware DMA to. So we can cast away the
++ * const because it is mutable from our perspective. */
++ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
++ uint32_t token;
++
++ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
++ if(token!=1)
++ return 0;
++ /*When token is set it indicates that VDQ command has been fetched by qbman and
++ *is working on it. It is safe for software to issue another VDQ command, so
++ *incrementing the busy variable.*/
++ if (s->vdq.storage == dq) {
++ s->vdq.storage = NULL;
++ atomic_inc(&s->vdq.busy);
++ }
++ return 1;
++}
++
++/********************************/
++/* Categorising qbman results */
++/********************************/
++
++static struct qb_attr_code code_result_in_mem =
++ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
++
++static inline int __qbman_result_is_x(const struct qbman_result *dq,
++ uint32_t x)
++{
++ const uint32_t *p = qb_cl(dq);
++ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
++ return (response_verb == x);
++}
++
++static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
++ uint32_t x)
++{
++ const uint32_t *p = qb_cl(dq);
++ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
++
++ return (response_verb == x);
++}
++
++int qbman_result_is_DQ(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
++}
++
++int qbman_result_is_FQDAN(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
++}
++
++int qbman_result_is_CDAN(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
++}
++
++int qbman_result_is_CSCN(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
++ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
++}
++
++int qbman_result_is_BPSCN(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
++}
++
++int qbman_result_is_CGCU(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
++}
++
++int qbman_result_is_FQRN(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
++}
++
++int qbman_result_is_FQRNI(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
++}
++
++int qbman_result_is_FQPN(const struct qbman_result *dq)
++{
++ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
++}
++
++/*********************************/
++/* Parsing frame dequeue results */
++/*********************************/
++
++/* These APIs assume qbman_result_is_DQ() is TRUE */
++
++uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++ return qb_attr_code_decode(&code_dqrr_stat, p);
++}
++
++uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
++}
++
++uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
++}
++
++uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++ return qb_attr_code_decode(&code_dqrr_fqid, p);
++}
++
++uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++ return qb_attr_code_decode(&code_dqrr_byte_count, p);
++}
++
++uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++ return qb_attr_code_decode(&code_dqrr_frame_count, p);
++}
++
++uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
++{
++ const uint64_t *p = (const uint64_t *)qb_cl(dq);
++
++ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
++}
++
++const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
++{
++ const uint32_t *p = qb_cl(dq);
++ return (const struct qbman_fd *)&p[8];
++}
++
++/**************************************/
++/* Parsing state-change notifications */
++/**************************************/
++
++static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
++static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
++static struct qb_attr_code code_scn_state_in_mem =
++ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
++static struct qb_attr_code code_scn_rid_in_mem =
++ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
++static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
++
++uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
++{
++ const uint32_t *p = qb_cl(scn);
++ return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
++}
++
++uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
++{
++ const uint32_t *p = qb_cl(scn);
++ return qb_attr_code_decode(&code_scn_rid, p);
++}
++
++uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
++{
++ const uint64_t *p = (const uint64_t *)qb_cl(scn);
++
++ return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
++}
++
++uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
++{
++ const uint32_t *p = qb_cl(scn);
++
++ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
++}
++
++uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
++{
++ const uint32_t *p = qb_cl(scn);
++ uint32_t result_rid;
++
++ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
++ return make_le24(result_rid);
++}
++
++/*****************/
++/* Parsing BPSCN */
++/*****************/
++uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
++{
++ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
++}
++
++int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
++{
++ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
++}
++
++int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
++{
++ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
++}
++
++int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
++{
++ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
++}
++
++uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
++{
++ uint64_t ctx;
++ uint32_t ctx_hi, ctx_lo;
++
++ ctx = qbman_result_SCN_ctx(scn);
++ ctx_hi = upper32(ctx);
++ ctx_lo = lower32(ctx);
++ return ((uint64_t)make_le32(ctx_hi) << 32 |
++ (uint64_t)make_le32(ctx_lo));
++}
++
++/*****************/
++/* Parsing CGCU */
++/*****************/
++uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
++{
++ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
++}
++
++uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
++{
++ uint64_t ctx;
++ uint32_t ctx_hi, ctx_lo;
++
++ ctx = qbman_result_SCN_ctx(scn);
++ ctx_hi = upper32(ctx);
++ ctx_lo = lower32(ctx);
++ return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
++ (uint64_t)make_le32(ctx_lo);
++}
++
++/******************/
++/* Buffer release */
++/******************/
++
++/* These should be const, eventually */
++/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
++static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
++static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
++static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
++
++void qbman_release_desc_clear(struct qbman_release_desc *d)
++{
++ uint32_t *cl;
++ memset(d, 0, sizeof(*d));
++ cl = qb_cl(d);
++ qb_attr_code_encode(&code_release_set_me, cl, 1);
++}
++
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_release_bpid, cl, bpid);
++}
++
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
++{
++ uint32_t *cl = qb_cl(d);
++ qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
++}
++
++#define RAR_IDX(rar) ((rar) & 0x7)
++#define RAR_VB(rar) ((rar) & 0x80)
++#define RAR_SUCCESS(rar) ((rar) & 0x100)
++
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const uint64_t *buffers, unsigned int num_buffers)
++{
++ uint32_t *p;
++ const uint32_t *cl = qb_cl(d);
++ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
++ pr_debug("RAR=%08x\n", rar);
++ if (!RAR_SUCCESS(rar))
++ return -EBUSY;
++ BUG_ON(!num_buffers || (num_buffers > 7));
++ /* Start the release command */
++ p = qbman_cena_write_start_wo_shadow(&s->sys,
++ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
++ /* Copy the caller's buffer pointers to the command */
++ u64_to_le32_copy(&p[2], buffers, num_buffers);
++ /* Set the verb byte, have to substitute in the valid-bit and the number
++ * of buffers. */
++ lwsync();
++ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
++ qbman_cena_write_complete_wo_shadow(&s->sys,
++ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
++ return 0;
++}
++
++/*******************/
++/* Buffer acquires */
++/*******************/
++
++/* These should be const, eventually */
++static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
++static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
++static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
++
++int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
++ unsigned int num_buffers)
++{
++ uint32_t *p;
++ uint32_t rslt, num;
++ BUG_ON(!num_buffers || (num_buffers > 7));
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_acquire_bpid, p, bpid);
++ qb_attr_code_encode(&code_acquire_num, p, num_buffers);
++
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
++
++ /* Decode the outcome */
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ num = qb_attr_code_decode(&code_acquire_r_num, p);
++ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_MC_ACQUIRE);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
++ bpid, rslt);
++ return -EIO;
++ }
++ BUG_ON(num > num_buffers);
++ /* Copy the acquired buffers to the caller's array */
++ u64_from_le32_copy(buffers, &p[2], num);
++ return (int)num;
++}
++
++/*****************/
++/* FQ management */
++/*****************/
++
++static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
++
++static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
++ uint8_t alt_fq_verb)
++{
++ uint32_t *p;
++ uint32_t rslt;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
++
++ /* Decode the outcome */
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
++ fqid, alt_fq_verb, rslt);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
++}
++
++int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
++}
++
++int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
++}
++
++int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
++}
++
++/**********************/
++/* Channel management */
++/**********************/
++
++static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
++static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
++static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
++static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
++
++/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
++ * would be irresponsible to expose it. */
++#define CODE_CDAN_WE_EN 0x1
++#define CODE_CDAN_WE_CTX 0x4
++
++static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
++ uint8_t we_mask, uint8_t cdan_en,
++ uint64_t ctx)
++{
++ uint32_t *p;
++ uint32_t rslt;
++
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ /* Encode the caller-provided attributes */
++ qb_attr_code_encode(&code_cdan_cid, p, channelid);
++ qb_attr_code_encode(&code_cdan_we, p, we_mask);
++ qb_attr_code_encode(&code_cdan_en, p, cdan_en);
++ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
++ /* Complete the management command */
++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
++
++ /* Decode the outcome */
++ rslt = qb_attr_code_decode(&code_generic_rslt, p);
++ BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
++ != QBMAN_WQCHAN_CONFIGURE);
++
++ /* Determine success or failure */
++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("CDAN cQID %d failed: code = 0x%02x\n",
++ channelid, rslt);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
++ uint64_t ctx)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_CTX,
++ 0, ctx);
++}
++
++int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 1, 0);
++}
++
++int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 0, 0);
++}
++
++int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
++ uint64_t ctx)
++{
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
++ 1, ctx);
++}
++
++uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr)
++{
++ return QBMAN_IDX_FROM_DQRR(dqrr);
++}
++
++struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
++{
++ struct qbman_result *dq;
++ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
++ return dq;
++}
++
++int qbman_swp_send_multiple(struct qbman_swp *s,
++ const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd,
++ int frames_to_send)
++{
++ uint32_t *p;
++ const uint32_t *cl = qb_cl(d);
++ uint32_t eqcr_ci;
++ uint8_t diff;
++ int sent = 0;
++ int i;
++ int initial_pi = s->eqcr.pi;
++ uint64_t start_pointer;
++
++
++ /* we are trying to send frames_to_send if we have enough space in the ring */
++ while(frames_to_send--)
++ {
++ if (!s->eqcr.available) {
++ eqcr_ci = s->eqcr.ci;
++ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
++ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
++ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
++ eqcr_ci, s->eqcr.ci);
++ s->eqcr.available += diff;
++ if (!diff)
++ {
++ goto done;
++ }
++ }
++
++ p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
++ QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
++ /* Write command (except of first byte) and FD */
++ memcpy(&p[1], &cl[1], 7);
++ memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
++
++ initial_pi++;
++ initial_pi &= 0xF;
++ s->eqcr.available--;
++ sent++;
++
++ }
++
++ done:
++ initial_pi = s->eqcr.pi;
++ lwsync();
++
++ /* in order for flushes to complete faster */
++ /*For that we use a following trick: we record all lines in 32 bit word */
++
++ initial_pi = s->eqcr.pi;
++ for(i = 0; i < sent; i++)
++ {
++ p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
++ QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
++
++ p[0] = cl[0] | s->eqcr.pi_vb;
++ initial_pi++;
++ initial_pi &= 0xF;
++
++ if (!(initial_pi & 7))
++ s->eqcr.pi_vb ^= QB_VALID_BIT;
++
++ }
++
++ initial_pi = s->eqcr.pi;
++
++ /* We need to flush all the lines but without load/store operations between them */
++ /* We assign start_pointer before we start loop so that in loop we do not read it from memory */
++ start_pointer = (uint64_t) s->sys.addr_cena;
++ for(i = 0; i < sent; i++)
++ {
++ p = (uint32_t *)(start_pointer + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
++ dcbf((uint64_t)p);
++ initial_pi++;
++ initial_pi &= 0xF;
++ }
++
++ /* Update producer index for the next call */
++ s->eqcr.pi = initial_pi;
++
++ return sent;
++}
+diff --git a/drivers/net/dpaa2/qbman/driver/qbman_portal.h b/drivers/net/dpaa2/qbman/driver/qbman_portal.h
+new file mode 100644
+index 0000000..f6ba86a
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/driver/qbman_portal.h
+@@ -0,0 +1,266 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "qbman_private.h"
++#include <drivers/fsl_qbman_portal.h>
++
++uint32_t qman_version;
++/* All QBMan command and result structures use this "valid bit" encoding */
++#define QB_VALID_BIT ((uint32_t)0x80)
++
++/* Management command result codes */
++#define QBMAN_MC_RSLT_OK 0xf0
++
++/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */
++#define QBMAN_DQRR_SIZE 4
++
++#define QBMAN_EQCR_SIZE 8
++
++static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
++{
++ /* 'first' is included, 'last' is excluded */
++ if (first <= last)
++ return last - first;
++ return (2 * ringsize) + last - first;
++}
++
++/* --------------------- */
++/* portal data structure */
++/* --------------------- */
++
++struct qbman_swp {
++ const struct qbman_swp_desc *desc;
++ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it
++ * needs in here. */
++ struct qbman_swp_sys sys;
++ /* Management commands */
++ struct {
++#ifdef QBMAN_CHECKING
++ enum swp_mc_check {
++ swp_mc_can_start, /* call __qbman_swp_mc_start() */
++ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */
++ swp_mc_can_poll, /* call __qbman_swp_mc_result() */
++ } check;
++#endif
++ uint32_t valid_bit; /* 0x00 or 0x80 */
++ } mc;
++ /* Push dequeues */
++ uint32_t sdq;
++ /* Volatile dequeues */
++ struct {
++ /* VDQCR supports a "1 deep pipeline", meaning that if you know
++ * the last-submitted command is already executing in the
++ * hardware (as evidenced by at least 1 valid dequeue result),
++ * you can write another dequeue command to the register, the
++ * hardware will start executing it as soon as the
++ * already-executing command terminates. (This minimises latency
++ * and stalls.) With that in mind, this "busy" variable refers
++ * to whether or not a command can be submitted, not whether or
++ * not a previously-submitted command is still executing. In
++ * other words, once proof is seen that the previously-submitted
++ * command is executing, "vdq" is no longer "busy". */
++ atomic_t busy;
++ uint32_t valid_bit; /* 0x00 or 0x80 */
++ /* We need to determine when vdq is no longer busy. This depends
++ * on whether the "busy" (last-submitted) dequeue command is
++ * targetting DQRR or main-memory, and detected is based on the
++ * presence of the dequeue command's "token" showing up in
++ * dequeue entries in DQRR or main-memory (respectively). */
++ struct qbman_result *storage; /* NULL if DQRR */
++ } vdq;
++ /* DQRR */
++ struct {
++ uint32_t next_idx;
++ uint32_t valid_bit;
++ uint8_t dqrr_size;
++ int reset_bug;
++ } dqrr;
++ struct {
++ uint32_t pi;
++ uint32_t pi_vb;
++ uint32_t ci;
++ int available;
++ } eqcr;
++};
++
++/* -------------------------- */
++/* portal management commands */
++/* -------------------------- */
++
++/* Different management commands all use this common base layer of code to issue
++ * commands and poll for results. The first function returns a pointer to where
++ * the caller should fill in their MC command (though they should ignore the
++ * verb byte), the second function commits merges in the caller-supplied command
++ * verb (which should not include the valid-bit) and submits the command to
++ * hardware, and the third function checks for a completed response (returns
++ * non-NULL if only if the response is complete). */
++void *qbman_swp_mc_start(struct qbman_swp *p);
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb);
++void *qbman_swp_mc_result(struct qbman_swp *p);
++
++/* Wraps up submit + poll-for-result */
++static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
++ uint32_t cmd_verb)
++{
++ int loopvar;
++ qbman_swp_mc_submit(swp, cmd, cmd_verb);
++ DBG_POLL_START(loopvar);
++ do {
++ DBG_POLL_CHECK(loopvar);
++ cmd = qbman_swp_mc_result(swp);
++ } while (!cmd);
++ return cmd;
++}
++
++/* ------------ */
++/* qb_attr_code */
++/* ------------ */
++
++/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
++ * is either serving as a configuration command or a query result. The
++ * representation is inherently little-endian, as the indexing of the words is
++ * itself little-endian in nature and DPAA2 QBMan is little endian for anything
++ * that crosses a word boundary too (64-bit fields are the obvious examples).
++ */
++struct qb_attr_code {
++ unsigned int word; /* which uint32_t[] array member encodes the field */
++ unsigned int lsoffset; /* encoding offset from ls-bit */
++ unsigned int width; /* encoding width. (bool must be 1.) */
++};
++
++/* Some pre-defined codes */
++extern struct qb_attr_code code_generic_verb;
++extern struct qb_attr_code code_generic_rslt;
++
++/* Macros to define codes */
++#define QB_CODE(a, b, c) { a, b, c}
++#define QB_CODE_NULL \
++ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)
++
++/* Rotate a code "ms", meaning that it moves from less-significant bytes to
++ * more-significant, from less-significant words to more-significant, etc. The
++ * "ls" version does the inverse, from more-significant towards
++ * less-significant.
++ */
++static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,
++ unsigned int bits)
++{
++ code->lsoffset += bits;
++ while (code->lsoffset > 31) {
++ code->word++;
++ code->lsoffset -= 32;
++ }
++}
++static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,
++ unsigned int bits)
++{
++ /* Don't be fooled, this trick should work because the types are
++ * unsigned. So the case that interests the while loop (the rotate has
++ * gone too far and the word count needs to compensate for it), is
++ * manifested when lsoffset is negative. But that equates to a really
++ * large unsigned value, starting with lots of "F"s. As such, we can
++ * continue adding 32 back to it until it wraps back round above zero,
++ * to a value of 31 or less...
++ */
++ code->lsoffset -= bits;
++ while (code->lsoffset > 31) {
++ code->word--;
++ code->lsoffset += 32;
++ }
++}
++/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */
++#define qb_attr_code_for_ms(code, bits, expr) \
++ for (; expr; qb_attr_code_rotate_ms(code, bits))
++#define qb_attr_code_for_ls(code, bits, expr) \
++ for (; expr; qb_attr_code_rotate_ls(code, bits))
++
++/* decode a field from a cacheline */
++static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,
++ const uint32_t *cacheline)
++{
++ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);
++}
++static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code,
++ const uint64_t *cacheline)
++{
++ return cacheline[code->word / 2];
++}
++
++/* encode a field to a cacheline */
++static inline void qb_attr_code_encode(const struct qb_attr_code *code,
++ uint32_t *cacheline, uint32_t val)
++{
++ cacheline[code->word] =
++ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word])
++ | e32_uint32_t(code->lsoffset, code->width, val);
++}
++static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
++ uint64_t *cacheline, uint64_t val)
++{
++ cacheline[code->word / 2] = val;
++}
++
++/* Small-width signed values (two's-complement) will decode into medium-width
++ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to
++ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value
++ * 249. Likewise -120 would decode as 136.) This function allows the caller to
++ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit
++ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t).
++ */
++static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
++ uint32_t val)
++{
++ BUG_ON(val >= (1u << code->width));
++ /* code->width should never exceed the width of val. If it does then a
++ * different function with larger val size must be used to translate
++ * from unsigned to signed */
++ BUG_ON(code->width > sizeof(val) * CHAR_BIT);
++ /* If the high bit was set, it was encoding a negative */
++ if (val >= 1u << (code->width - 1))
++ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) -
++ val);
++ /* Otherwise, it was encoding a positive */
++ return (int32_t)val;
++}
++
++/* ---------------------- */
++/* Descriptors/cachelines */
++/* ---------------------- */
++
++/* To avoid needless dynamic allocation, the driver API often gives the caller
++ * a "descriptor" type that the caller can instantiate however they like.
++ * Ultimately though, it is just a cacheline of binary storage (or something
++ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
++ * holding pre-formatted pieces of harware commands. The performance-critical
++ * code can then copy these descriptors directly into hardware command
++ * registers more efficiently than trying to construct/format commands
++ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
++ * order for the compiler to know its size, but the internal details are not
++ * exposed. The following macro is used within the driver for converting *any*
++ * descriptor pointer to a usable array pointer. The use of a macro (instead of
++ * an inline) is necessary to work with different descriptor types and to work
++ * correctly with const and non-const inputs (and similarly-qualified outputs).
++ */
++#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
+diff --git a/drivers/net/dpaa2/qbman/driver/qbman_private.h b/drivers/net/dpaa2/qbman/driver/qbman_private.h
+new file mode 100644
+index 0000000..4e50b61
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/driver/qbman_private.h
+@@ -0,0 +1,165 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++*/
++
++/* Perform extra checking */
++#define QBMAN_CHECKING
++
++/* To maximise the amount of logic that is common between the Linux driver and
++ * other targets (such as the embedded MC firmware), we pivot here between the
++ * inclusion of two platform-specific headers.
++ *
++ * The first, qbman_sys_decl.h, includes any and all required system headers as
++ * well as providing any definitions for the purposes of compatibility. The
++ * second, qbman_sys.h, is where platform-specific routines go.
++ *
++ * The point of the split is that the platform-independent code (including this
++ * header) may depend on platform-specific declarations, yet other
++ * platform-specific routines may depend on platform-independent definitions.
++ */
++
++#include "qbman_sys_decl.h"
++
++/* When things go wrong, it is a convenient trick to insert a few FOO()
++ * statements in the code to trace progress. TODO: remove this once we are
++ * hacking the code less actively.
++ */
++#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__)
++
++/* Any time there is a register interface which we poll on, this provides a
++ * "break after x iterations" scheme for it. It's handy for debugging, eg.
++ * where you don't want millions of lines of log output from a polling loop
++ * that won't, because such things tend to drown out the earlier log output
++ * that might explain what caused the problem. (NB: put ";" after each macro!)
++ * TODO: we should probably remove this once we're done sanitising the
++ * simulator...
++ */
++#define DBG_POLL_START(loopvar) (loopvar = 10)
++#define DBG_POLL_CHECK(loopvar) \
++ do {if (!(loopvar--)) BUG_ON(NULL == "DBG_POLL_CHECK"); } while (0)
++
++/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
++ * and widths, these macro-generated encode/decode/isolate/remove inlines can
++ * be used.
++ *
++ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type),
++ * where the field is located 3 bits "up" from the least-significant bit of the
++ * register (ie. the field location within the 32-bit register corresponds to a
++ * mask of 0x0001fff8), you would do;
++ * uint16_t field = d32_uint16_t(3, 14, reg_value);
++ *
++ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
++ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
++ * operator) into a register at bit location 0x00080000 (19 bits "in" from the
++ * LS bit), do;
++ * reg_value |= e32_int(19, 1, !!field);
++ *
++ * If you wish to read-modify-write a register, such that you leave the 14-bit
++ * field as-is but have all other fields set to zero, then "i"solate the 14-bit
++ * value using;
++ * reg_value = i32_uint16_t(3, 14, reg_value);
++ *
++ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
++ * zero) but leaving all other fields as-is;
++ * reg_val = r32_int(19, 1, reg_value);
++ *
++ */
++#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
++ (uint32_t)((1 << width) - 1))
++#define DECLARE_CODEC32(t) \
++static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \
++{ \
++ BUG_ON(width > (sizeof(t) * 8)); \
++ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \
++} \
++static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \
++{ \
++ BUG_ON(width > (sizeof(t) * 8)); \
++ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
++} \
++static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \
++ uint32_t val) \
++{ \
++ BUG_ON(width > (sizeof(t) * 8)); \
++ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
++} \
++static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \
++ uint32_t val) \
++{ \
++ BUG_ON(width > (sizeof(t) * 8)); \
++ return ~(MAKE_MASK32(width) << lsoffset) & val; \
++}
++DECLARE_CODEC32(uint32_t)
++DECLARE_CODEC32(uint16_t)
++DECLARE_CODEC32(uint8_t)
++DECLARE_CODEC32(int)
++
++ /*********************/
++ /* Debugging assists */
++ /*********************/
++
++static inline void __hexdump(unsigned long start, unsigned long end,
++ unsigned long p, size_t sz, const unsigned char *c)
++{
++ while (start < end) {
++ unsigned int pos = 0;
++ char buf[64];
++ int nl = 0;
++ pos += sprintf(buf + pos, "%08lx: ", start);
++ do {
++ if ((start < p) || (start >= (p + sz)))
++ pos += sprintf(buf + pos, "..");
++ else
++ pos += sprintf(buf + pos, "%02x", *(c++));
++ if (!(++start & 15)) {
++ buf[pos++] = '\n';
++ nl = 1;
++ } else {
++ nl = 0;
++ if (!(start & 1))
++ buf[pos++] = ' ';
++ if (!(start & 3))
++ buf[pos++] = ' ';
++ }
++ } while (start & 15);
++ if (!nl)
++ buf[pos++] = '\n';
++ buf[pos] = '\0';
++ pr_info("%s", buf);
++ }
++}
++static inline void hexdump(const void *ptr, size_t sz)
++{
++ unsigned long p = (unsigned long)ptr;
++ unsigned long start = p & ~(unsigned long)15;
++ unsigned long end = (p + sz + 15) & ~(unsigned long)15;
++ const unsigned char *c = ptr;
++ __hexdump(start, end, p, sz, c);
++}
++
++#define QMAN_REV_4000 0x04000000
++#define QMAN_REV_4100 0x04010000
++#define QMAN_REV_4101 0x04010001
++
++#include "qbman_sys.h"
+diff --git a/drivers/net/dpaa2/qbman/driver/qbman_sys.h b/drivers/net/dpaa2/qbman/driver/qbman_sys.h
+new file mode 100644
+index 0000000..d912ab0
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/driver/qbman_sys.h
+@@ -0,0 +1,367 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
++ * driver. They are only included via qbman_private.h, which is itself a
++ * platform-independent file and is included by all the other driver source.
++ *
++ * qbman_sys_decl.h is included prior to all other declarations and logic, and
++ * it exists to provide compatibility with any linux interfaces our
++ * single-source driver code is dependent on (eg. kmalloc). Ie. this file
++ * provides linux compatibility.
++ *
++ * This qbman_sys.h header, on the other hand, is included *after* any common
++ * and platform-neutral declarations and logic in qbman_private.h, and exists to
++ * implement any platform-specific logic of the qbman driver itself. Ie. it is
++ * *not* to provide linux compatibility.
++ */
++
++/* Trace the 3 different classes of read/write access to QBMan. #undef as
++ * required. */
++#undef QBMAN_CCSR_TRACE
++#undef QBMAN_CINH_TRACE
++#undef QBMAN_CENA_TRACE
++
++static inline void word_copy(void *d, const void *s, unsigned int cnt)
++{
++ uint32_t *dd = d;
++ const uint32_t *ss = s;
++ while (cnt--)
++ *(dd++) = *(ss++);
++}
++
++/* Currently, the CENA support code expects each 32-bit word to be written in
++ * host order, and these are converted to hardware (little-endian) order on
++ * command submission. However, 64-bit quantities are must be written (and read)
++ * as two 32-bit words with the least-significant word first, irrespective of
++ * host endianness. */
++static inline void u64_to_le32_copy(void *d, const uint64_t *s,
++ unsigned int cnt)
++{
++ uint32_t *dd = d;
++ const uint32_t *ss = (const uint32_t *)s;
++ while (cnt--) {
++ /* TBD: the toolchain was choking on the use of 64-bit types up
++ * until recently so this works entirely with 32-bit variables.
++ * When 64-bit types become usable again, investigate better
++ * ways of doing this. */
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ *(dd++) = ss[1];
++ *(dd++) = ss[0];
++ ss += 2;
++#else
++ *(dd++) = *(ss++);
++ *(dd++) = *(ss++);
++#endif
++ }
++}
++static inline void u64_from_le32_copy(uint64_t *d, const void *s,
++ unsigned int cnt)
++{
++ const uint32_t *ss = s;
++ uint32_t *dd = (uint32_t *)d;
++ while (cnt--) {
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ dd[1] = *(ss++);
++ dd[0] = *(ss++);
++ dd += 2;
++#else
++ *(dd++) = *(ss++);
++ *(dd++) = *(ss++);
++#endif
++ }
++}
++
++/* Convert a host-native 32bit value into little endian */
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++static inline uint32_t make_le32(uint32_t val)
++{
++ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
++ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24);
++}
++static inline uint32_t make_le24(uint32_t val)
++{
++ return (((val & 0xff) << 16) | (val & 0xff00) |
++ ((val & 0xff0000) >> 16));
++}
++#else
++#define make_le32(val) (val)
++#define make_le24(val) (val)
++#endif
++static inline void make_le32_n(uint32_t *val, unsigned int num)
++{
++ while (num--) {
++ *val = make_le32(*val);
++ val++;
++ }
++}
++
++ /******************/
++ /* Portal access */
++ /******************/
++struct qbman_swp_sys {
++ /* On GPP, the sys support for qbman_swp is here. The CENA region isi
++ * not an mmap() of the real portal registers, but an allocated
++ * place-holder, because the actual writes/reads to/from the portal are
++ * marshalled from these allocated areas using QBMan's "MC access
++ * registers". CINH accesses are atomic so there's no need for a
++ * place-holder. */
++ uint8_t *cena;
++ uint8_t __iomem *addr_cena;
++ uint8_t __iomem *addr_cinh;
++ uint32_t idx;
++ enum qbman_eqcr_mode eqcr_mode;
++};
++
++/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
++ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
++ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
++ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
++ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
++ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
++ */
++
++static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
++ uint32_t val)
++{
++
++ __raw_writel(val, s->addr_cinh + offset);
++#ifdef QBMAN_CINH_TRACE
++ pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
++ s->addr_cinh, s->idx, offset, val);
++#endif
++}
++
++static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
++{
++ uint32_t reg = __raw_readl(s->addr_cinh + offset);
++#ifdef QBMAN_CINH_TRACE
++ pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
++ s->addr_cinh, s->idx, offset, reg);
++#endif
++ return reg;
++}
++
++static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++ void *shadow = s->cena + offset;
++
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
++ s->addr_cena, s->idx, offset, shadow);
++#endif
++ BUG_ON(offset & 63);
++ dcbz(shadow);
++ return shadow;
++}
++
++static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
++ s->addr_cena, s->idx, offset);
++#endif
++ BUG_ON(offset & 63);
++ return (s->addr_cena + offset);
++}
++
++static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
++ uint32_t offset, void *cmd)
++{
++ const uint32_t *shadow = cmd;
++ int loop;
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
++ s->addr_cena, s->idx, offset, shadow);
++ hexdump(cmd, 64);
++#endif
++ for (loop = 15; loop >= 1; loop--)
++ __raw_writel(shadow[loop], s->addr_cena +
++ offset + loop * 4);
++ lwsync();
++ __raw_writel(shadow[0], s->addr_cena + offset);
++ dcbf(s->addr_cena + offset);
++}
++
++static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
++ s->addr_cena, s->idx, offset);
++ hexdump(cmd, 64);
++#endif
++ dcbf(s->addr_cena + offset);
++}
++
++static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++ return __raw_readl(s->addr_cena + offset);
++}
++
++static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
++{
++ uint32_t *shadow = (uint32_t *)(s->cena + offset);
++ unsigned int loop;
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
++ s->addr_cena, s->idx, offset, shadow);
++#endif
++
++ for (loop = 0; loop < 16; loop++)
++ shadow[loop] = __raw_readl(s->addr_cena + offset
++ + loop * 4);
++#ifdef QBMAN_CENA_TRACE
++ hexdump(shadow, 64);
++#endif
++ return shadow;
++}
++
++static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++#ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
++ s->addr_cena, s->idx, offset, shadow);
++#endif
++
++#ifdef QBMAN_CENA_TRACE
++ hexdump(shadow, 64);
++#endif
++ return s->addr_cena + offset;
++}
++
++static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++ dccivac(s->addr_cena + offset);
++}
++
++static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++ dccivac(s->addr_cena + offset);
++ prefetch_for_load(s->addr_cena + offset);
++}
++
++static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++ prefetch_for_load(s->addr_cena + offset);
++}
++
++ /******************/
++ /* Portal support */
++ /******************/
++
++/* The SWP_CFG portal register is special, in that it is used by the
++ * platform-specific code rather than the platform-independent code in
++ * qbman_portal.c. So use of it is declared locally here. */
++#define QBMAN_CINH_SWP_CFG 0xd00
++
++/* For MC portal use, we always configure with
++ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4)
++ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x2)
++ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3)
++ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2)
++ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x2)
++ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- TRUE)
++ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE)
++ * SE is (SWP_CFG,3,1) - memory stashing enable (<- TRUE)
++ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE)
++ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- TRUE)
++ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- TRUE)
++ */
++static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
++ uint8_t est, uint8_t rpm, uint8_t dcm,
++ uint8_t epm, int sd, int sp, int se,
++ int dp, int de, int ep)
++{
++ uint32_t reg;
++ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) |
++ e32_uint8_t(16, 3, est) |
++ e32_uint8_t(12, 2, rpm) | e32_uint8_t(10, 2, dcm) |
++ e32_uint8_t(8, 2, epm) | e32_int(5, 1, sd) |
++ e32_int(4, 1, sp) | e32_int(3, 1, se) | e32_int(2, 1, dp) |
++ e32_int(1, 1, de) | e32_int(0, 1, ep) | e32_uint8_t(14, 1, wn);
++ return reg;
++}
++
++static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
++ const struct qbman_swp_desc *d,
++ uint8_t dqrr_size)
++{
++ uint32_t reg;
++ s->addr_cena = d->cena_bar;
++ s->addr_cinh = d->cinh_bar;
++ s->idx = (uint32_t)d->idx;
++ s->cena = (void *)get_zeroed_page(GFP_KERNEL);
++ if (!s->cena) {
++ pr_err("Could not allocate page for cena shadow\n");
++ return -1;
++ }
++ s->eqcr_mode = d->eqcr_mode;
++ BUG_ON(d->idx < 0);
++#ifdef QBMAN_CHECKING
++ /* We should never be asked to initialise for a portal that isn't in
++ * the power-on state. (Ie. don't forget to reset portals when they are
++ * decommissioned!)
++ */
++ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
++ BUG_ON(reg);
++#endif
++ if (s->eqcr_mode == qman_eqcr_vb_array)
++ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1,
++ 1, 1);
++ else
++ reg = qbman_set_swp_cfg(dqrr_size, 0, 2, 3, 2, 2, 1, 1, 1, 1,
++ 1, 1);
++ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
++ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
++ if (!reg) {
++ pr_err("The portal %d is not enabled!\n", s->idx);
++ kfree(s->cena);
++ return -1;
++ }
++ return 0;
++}
++
++static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
++{
++ free_page((unsigned long)s->cena);
++}
++
++static inline void *qbman_cena_write_start_wo_shadow_fast(struct qbman_swp_sys *s,
++ uint32_t offset)
++{
++ #ifdef QBMAN_CENA_TRACE
++ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
++ s->addr_cena, s->idx, offset);
++ #endif
++ BUG_ON(offset & 63);
++ return (s->addr_cena + offset);
++}
+diff --git a/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h b/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h
+new file mode 100644
+index 0000000..ae7ef97
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h
+@@ -0,0 +1,68 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <compat.h>
++#include <drivers/fsl_qbman_base.h>
++
++/* Sanity check */
++#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \
++ (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__)
++#error "Unknown endianness!"
++#endif
++
++/* The platform-independent code shouldn't need endianness, except for
++ * weird/fast-path cases like qbman_result_has_token(), which needs to
++ * perform a passive and endianness-specific test on a read-only data structure
++ * very quickly. It's an exception, and this symbol is used for that case. */
++#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++#define DQRR_TOK_OFFSET 0
++#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24
++#define SCN_STATE_OFFSET_IN_MEM 8
++#define SCN_RID_OFFSET_IN_MEM 8
++#else
++#define DQRR_TOK_OFFSET 24
++#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0
++#define SCN_STATE_OFFSET_IN_MEM 16
++#define SCN_RID_OFFSET_IN_MEM 0
++#endif
++
++/* Similarly-named functions */
++#define upper32(a) upper_32_bits(a)
++#define lower32(a) lower_32_bits(a)
++
++ /****************/
++ /* arch assists */
++ /****************/
++#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
++#define lwsync() { asm volatile("dmb st" : : : "memory"); }
++#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
++#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
++static inline void prefetch_for_load(void *p)
++{
++ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p));
++}
++static inline void prefetch_for_store(void *p)
++{
++ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p));
++}
+diff --git a/drivers/net/dpaa2/qbman/include/compat.h b/drivers/net/dpaa2/qbman/include/compat.h
+new file mode 100644
+index 0000000..0d14b58
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/include/compat.h
+@@ -0,0 +1,597 @@
++/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef HEADER_COMPAT_H
++#define HEADER_COMPAT_H
++
++#include <sched.h>
++
++#ifndef _GNU_SOURCE
++#define _GNU_SOURCE
++#endif
++#include <stdint.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <errno.h>
++#include <string.h>
++#include <pthread.h>
++#include <net/ethernet.h>
++#include <stdio.h>
++#include <stdbool.h>
++#include <ctype.h>
++#include <malloc.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#include <unistd.h>
++#include <sys/mman.h>
++#include <limits.h>
++#include <assert.h>
++#include <dirent.h>
++#include <inttypes.h>
++#include <error.h>
++
++/* The following definitions are primarily to allow the single-source driver
++ * interfaces to be included by arbitrary program code. Ie. for interfaces that
++ * are also available in kernel-space, these definitions provide compatibility
++ * with certain attributes and types used in those interfaces. */
++
++/* Required compiler attributes */
++#define __maybe_unused __attribute__((unused))
++#define __always_unused __attribute__((unused))
++#define __packed __attribute__((__packed__))
++#define __user
++#define likely(x) __builtin_expect(!!(x), 1)
++#define unlikely(x) __builtin_expect(!!(x), 0)
++#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
++#define container_of(p, t, f) (t *)((void *)p - offsetof(t, f))
++#define __stringify_1(x) #x
++#define __stringify(x) __stringify_1(x)
++#define panic(x) \
++do { \
++ printf("panic: %s", x); \
++ abort(); \
++} while (0)
++
++#ifdef ARRAY_SIZE
++#undef ARRAY_SIZE
++#endif
++#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
++
++/* Required types */
++typedef uint8_t u8;
++typedef uint16_t u16;
++typedef uint32_t u32;
++typedef uint64_t u64;
++typedef uint64_t dma_addr_t;
++typedef cpu_set_t cpumask_t;
++#define spinlock_t pthread_mutex_t
++typedef u32 compat_uptr_t;
++static inline void __user *compat_ptr(compat_uptr_t uptr)
++{
++ return (void __user *)(unsigned long)uptr;
++}
++
++static inline compat_uptr_t ptr_to_compat(void __user *uptr)
++{
++ return (u32)(unsigned long)uptr;
++}
++
++/* I/O operations */
++static inline u32 in_be32(volatile void *__p)
++{
++ volatile u32 *p = __p;
++ return *p;
++}
++static inline void out_be32(volatile void *__p, u32 val)
++{
++ volatile u32 *p = __p;
++ *p = val;
++}
++
++/* Debugging */
++#define prflush(fmt, args...) \
++ do { \
++ printf(fmt, ##args); \
++ fflush(stdout); \
++ } while (0)
++#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
++#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
++#define pr_warning(fmt, args...) prflush("WARN:" fmt, ##args)
++#define pr_info(fmt, args...) prflush(fmt, ##args)
++
++#define BUG() abort()
++#ifdef CONFIG_BUGON
++#ifdef pr_debug
++#undef pr_debug
++#endif
++#define pr_debug(fmt, args...) printf(fmt, ##args)
++#define BUG_ON(c) \
++do { \
++ if (c) { \
++ pr_crit("BUG: %s:%d\n", __FILE__, __LINE__); \
++ abort(); \
++ } \
++} while(0)
++#define might_sleep_if(c) BUG_ON(c)
++#define msleep(x) \
++do { \
++ pr_crit("BUG: illegal call %s:%d\n", __FILE__, __LINE__); \
++ exit(EXIT_FAILURE); \
++} while(0)
++#else
++#ifdef pr_debug
++#undef pr_debug
++#endif
++#define pr_debug(fmt, args...) do { ; } while(0)
++#define BUG_ON(c) do { ; } while(0)
++#define might_sleep_if(c) do { ; } while(0)
++#define msleep(x) do { ; } while(0)
++#endif
++#define WARN_ON(c, str) \
++do { \
++ static int warned_##__LINE__; \
++ if ((c) && !warned_##__LINE__) { \
++ pr_warning("%s\n", str); \
++ pr_warning("(%s:%d)\n", __FILE__, __LINE__); \
++ warned_##__LINE__ = 1; \
++ } \
++} while (0)
++
++#define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1))
++
++/****************/
++/* Linked-lists */
++/****************/
++
++struct list_head {
++ struct list_head *prev;
++ struct list_head *next;
++};
++
++#define LIST_HEAD(n) \
++struct list_head n = { \
++ .prev = &n, \
++ .next = &n \
++}
++#define INIT_LIST_HEAD(p) \
++do { \
++ struct list_head *__p298 = (p); \
++ __p298->prev = __p298->next =__p298; \
++} while(0)
++#define list_entry(node, type, member) \
++ (type *)((void *)node - offsetof(type, member))
++#define list_empty(p) \
++({ \
++ const struct list_head *__p298 = (p); \
++ ((__p298->next == __p298) && (__p298->prev == __p298)); \
++})
++#define list_add(p,l) \
++do { \
++ struct list_head *__p298 = (p); \
++ struct list_head *__l298 = (l); \
++ __p298->next = __l298->next; \
++ __p298->prev = __l298; \
++ __l298->next->prev = __p298; \
++ __l298->next = __p298; \
++} while(0)
++#define list_add_tail(p,l) \
++do { \
++ struct list_head *__p298 = (p); \
++ struct list_head *__l298 = (l); \
++ __p298->prev = __l298->prev; \
++ __p298->next = __l298; \
++ __l298->prev->next = __p298; \
++ __l298->prev = __p298; \
++} while(0)
++#define list_for_each(i, l) \
++ for (i = (l)->next; i != (l); i = i->next)
++#define list_for_each_safe(i, j, l) \
++ for (i = (l)->next, j = i->next; i != (l); \
++ i = j, j = i->next)
++#define list_for_each_entry(i, l, name) \
++ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
++ i = list_entry(i->name.next, typeof(*i), name))
++#define list_for_each_entry_safe(i, j, l, name) \
++ for (i = list_entry((l)->next, typeof(*i), name), \
++ j = list_entry(i->name.next, typeof(*j), name); \
++ &i->name != (l); \
++ i = j, j = list_entry(j->name.next, typeof(*j), name))
++#define list_del(i) \
++do { \
++ (i)->next->prev = (i)->prev; \
++ (i)->prev->next = (i)->next; \
++} while(0)
++
++/* Other miscellaneous interfaces our APIs depend on; */
++
++#define lower_32_bits(x) ((u32)(x))
++#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
++
++/* Compiler/type stuff */
++typedef unsigned int gfp_t;
++typedef uint32_t phandle;
++
++#define noinline __attribute__((noinline))
++#define __iomem
++#define EINTR 4
++#define ENODEV 19
++#define MODULE_AUTHOR(s)
++#define MODULE_LICENSE(s)
++#define MODULE_DESCRIPTION(s)
++#define MODULE_PARM_DESC(x, y)
++#define EXPORT_SYMBOL(x)
++#define module_init(fn) int m_##fn(void) { return fn(); }
++#define module_exit(fn) void m_##fn(void) { fn(); }
++#define module_param(x, y, z)
++#define module_param_string(w, x, y, z)
++#define GFP_KERNEL 0
++#define __KERNEL__
++#define __init
++#define __raw_readb(p) *(const volatile unsigned char *)(p)
++#define __raw_readl(p) *(const volatile unsigned int *)(p)
++#define __raw_writel(v, p) \
++do { \
++ *(volatile unsigned int *)(p) = (v); \
++} while (0)
++
++/* printk() stuff */
++#define printk(fmt, args...) do_not_use_printk
++#define nada(fmt, args...) do { ; } while(0)
++
++/* Interrupt stuff */
++typedef uint32_t irqreturn_t;
++#define IRQ_HANDLED 0
++
++/* memcpy() stuff - when you know alignments in advance */
++#ifdef CONFIG_TRY_BETTER_MEMCPY
++static inline void copy_words(void *dest, const void *src, size_t sz)
++{
++ u32 *__dest = dest;
++ const u32 *__src = src;
++ size_t __sz = sz >> 2;
++ BUG_ON((unsigned long)dest & 0x3);
++ BUG_ON((unsigned long)src & 0x3);
++ BUG_ON(sz & 0x3);
++ while (__sz--)
++ *(__dest++) = *(__src++);
++}
++static inline void copy_shorts(void *dest, const void *src, size_t sz)
++{
++ u16 *__dest = dest;
++ const u16 *__src = src;
++ size_t __sz = sz >> 1;
++ BUG_ON((unsigned long)dest & 0x1);
++ BUG_ON((unsigned long)src & 0x1);
++ BUG_ON(sz & 0x1);
++ while (__sz--)
++ *(__dest++) = *(__src++);
++}
++static inline void copy_bytes(void *dest, const void *src, size_t sz)
++{
++ u8 *__dest = dest;
++ const u8 *__src = src;
++ while (sz--)
++ *(__dest++) = *(__src++);
++}
++#else
++#define copy_words memcpy
++#define copy_shorts memcpy
++#define copy_bytes memcpy
++#endif
++
++/* Spinlock stuff */
++#define spinlock_t pthread_mutex_t
++#define __SPIN_LOCK_UNLOCKED(x) PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++#define spin_lock_init(x) \
++ do { \
++ __maybe_unused int __foo; \
++ pthread_mutexattr_t __foo_attr; \
++ __foo = pthread_mutexattr_init(&__foo_attr); \
++ BUG_ON(__foo); \
++ __foo = pthread_mutexattr_settype(&__foo_attr, \
++ PTHREAD_MUTEX_ADAPTIVE_NP); \
++ BUG_ON(__foo); \
++ __foo = pthread_mutex_init(x, &__foo_attr); \
++ BUG_ON(__foo); \
++ } while (0)
++#define spin_lock(x) \
++ do { \
++ __maybe_unused int __foo = pthread_mutex_lock(x); \
++ BUG_ON(__foo); \
++ } while (0)
++#define spin_unlock(x) \
++ do { \
++ __maybe_unused int __foo = pthread_mutex_unlock(x); \
++ BUG_ON(__foo); \
++ } while (0)
++#define spin_lock_irq(x) do { \
++ local_irq_disable(); \
++ spin_lock(x); \
++ } while (0)
++#define spin_unlock_irq(x) do { \
++ spin_unlock(x); \
++ local_irq_enable(); \
++ } while (0)
++#define spin_lock_irqsave(x, f) do { spin_lock_irq(x); } while (0)
++#define spin_unlock_irqrestore(x, f) do { spin_unlock_irq(x); } while (0)
++
++#define raw_spinlock_t spinlock_t
++#define raw_spin_lock_init(x) spin_lock_init(x)
++#define raw_spin_lock_irqsave(x, f) spin_lock(x)
++#define raw_spin_unlock_irqrestore(x, f) spin_unlock(x)
++
++/* Completion stuff */
++#define DECLARE_COMPLETION(n) int n = 0;
++#define complete(n) \
++do { \
++ *n = 1; \
++} while(0)
++#define wait_for_completion(n) \
++do { \
++ while (!*n) { \
++ bman_poll(); \
++ qman_poll(); \
++ } \
++ *n = 0; \
++} while(0)
++
++/* Platform device stuff */
++struct platform_device { void *dev; };
++static inline struct
++platform_device *platform_device_alloc(const char *name __always_unused,
++ int id __always_unused)
++{
++ struct platform_device *ret = malloc(sizeof(*ret));
++ if (ret)
++ ret->dev = NULL;
++ return ret;
++}
++#define platform_device_add(pdev) 0
++#define platform_device_del(pdev) do { ; } while(0)
++static inline void platform_device_put(struct platform_device *pdev)
++{
++ free(pdev);
++}
++struct resource {
++ int unused;
++};
++
++/* Allocator stuff */
++#define kmalloc(sz, t) malloc(sz)
++#define vmalloc(sz) malloc(sz)
++#define kfree(p) do { if (p) free(p); } while (0)
++static inline void *kzalloc(size_t sz, gfp_t __foo __always_unused)
++{
++ void *ptr = malloc(sz);
++ if (ptr)
++ memset(ptr, 0, sz);
++ return ptr;
++}
++static inline unsigned long get_zeroed_page(gfp_t __foo __always_unused)
++{
++ void *p;
++ if (posix_memalign(&p, 4096, 4096))
++ return 0;
++ memset(p, 0, 4096);
++ return (unsigned long)p;
++}
++static inline void free_page(unsigned long p)
++{
++ free((void *)p);
++}
++struct kmem_cache {
++ size_t sz;
++ size_t align;
++};
++#define SLAB_HWCACHE_ALIGN 0
++static inline struct kmem_cache *kmem_cache_create(const char *n __always_unused,
++ size_t sz, size_t align, unsigned long flags __always_unused,
++ void (*c)(void *) __always_unused)
++{
++ struct kmem_cache *ret = malloc(sizeof(*ret));
++ if (ret) {
++ ret->sz = sz;
++ ret->align = align;
++ }
++ return ret;
++}
++static inline void kmem_cache_destroy(struct kmem_cache *c)
++{
++ free(c);
++}
++static inline void *kmem_cache_alloc(struct kmem_cache *c, gfp_t f __always_unused)
++{
++ void *p;
++ if (posix_memalign(&p, c->align, c->sz))
++ return NULL;
++ return p;
++}
++static inline void kmem_cache_free(struct kmem_cache *c __always_unused, void *p)
++{
++ free(p);
++}
++static inline void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t f)
++{
++ void *ret = kmem_cache_alloc(c, f);
++ if (ret)
++ memset(ret, 0, c->sz);
++ return ret;
++}
++
++/* Bitfield stuff. */
++#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
++#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
++#define BITS_MASK(idx) ((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1)))
++#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
++static inline unsigned long test_bits(unsigned long mask,
++ volatile unsigned long *p)
++{
++ return *p & mask;
++}
++static inline int test_bit(int idx, volatile unsigned long *bits)
++{
++ return test_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
++}
++static inline void set_bits(unsigned long mask, volatile unsigned long *p)
++{
++ *p |= mask;
++}
++static inline void set_bit(int idx, volatile unsigned long *bits)
++{
++ set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
++}
++static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
++{
++ *p &= ~mask;
++}
++static inline void clear_bit(int idx, volatile unsigned long *bits)
++{
++ clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
++}
++static inline unsigned long test_and_set_bits(unsigned long mask,
++ volatile unsigned long *p)
++{
++ unsigned long ret = test_bits(mask, p);
++ set_bits(mask, p);
++ return ret;
++}
++static inline int test_and_set_bit(int idx, volatile unsigned long *bits)
++{
++ int ret = test_bit(idx, bits);
++ set_bit(idx, bits);
++ return ret;
++}
++static inline int test_and_clear_bit(int idx, volatile unsigned long *bits)
++{
++ int ret = test_bit(idx, bits);
++ clear_bit(idx, bits);
++ return ret;
++}
++static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx)
++{
++ while ((++idx < limit) && test_bit(idx, bits))
++ ;
++ return idx;
++}
++static inline int find_first_zero_bit(unsigned long *bits, int limit)
++{
++ int idx = 0;
++ while (test_bit(idx, bits) && (++idx < limit))
++ ;
++ return idx;
++}
++
++static inline u64 div64_u64(u64 n, u64 d)
++{
++ return n / d;
++}
++
++#define dmb(opt) { asm volatile("dmb " #opt : : : "memory"); }
++#define smp_mb() dmb(ish)
++
++/* Atomic stuff */
++typedef struct {
++ int counter;
++} atomic_t;
++
++#define atomic_read(v) (*(volatile int *)&(v)->counter)
++#define atomic_set(v, i) (((v)->counter) = (i))
++static inline void atomic_add(int i, atomic_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ asm volatile("// atomic_add\n"
++ "1: ldxr %w0, %2\n"
++ " add %w0, %w0, %w3\n"
++ " stxr %w1, %w0, %2\n"
++ " cbnz %w1, 1b"
++ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
++ : "Ir" (i));
++}
++
++static inline int atomic_add_return(int i, atomic_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ asm volatile("// atomic_add_return\n"
++ "1: ldxr %w0, %2\n"
++ " add %w0, %w0, %w3\n"
++ " stlxr %w1, %w0, %2\n"
++ " cbnz %w1, 1b"
++ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
++ : "Ir" (i)
++ : "memory");
++
++ smp_mb();
++ return result;
++}
++
++static inline void atomic_sub(int i, atomic_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ asm volatile("// atomic_sub\n"
++ "1: ldxr %w0, %2\n"
++ " sub %w0, %w0, %w3\n"
++ " stxr %w1, %w0, %2\n"
++ " cbnz %w1, 1b"
++ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
++ : "Ir" (i));
++}
++
++static inline int atomic_sub_return(int i, atomic_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ asm volatile("// atomic_sub_return\n"
++ "1: ldxr %w0, %2\n"
++ " sub %w0, %w0, %w3\n"
++ " stlxr %w1, %w0, %2\n"
++ " cbnz %w1, 1b"
++ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
++ : "Ir" (i)
++ : "memory");
++
++ smp_mb();
++ return result;
++}
++
++#define atomic_inc(v) atomic_add(1, v)
++#define atomic_dec(v) atomic_sub(1, v)
++
++#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
++#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
++#define atomic_inc_return(v) (atomic_add_return(1, v))
++#define atomic_dec_return(v) (atomic_sub_return(1, v))
++#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
++
++#endif /* HEADER_COMPAT_H */
+diff --git a/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h
+new file mode 100644
+index 0000000..4cb784c
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h
+@@ -0,0 +1,151 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_QBMAN_BASE_H
++#define _FSL_QBMAN_BASE_H
++
++/**
++ * DOC: QBMan basic structures
++ *
++ * The QBMan block descriptor, software portal descriptor and Frame descriptor
++ * are defined here.
++ *
++ */
++
++/**
++ * struct qbman_block_desc - qbman block descriptor structure
++ * @ccsr_reg_bar: CCSR register map.
++ * @irq_rerr: Recoverable error interrupt line.
++ * @irq_nrerr: Non-recoverable error interrupt line
++ *
++ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not
++ * control this QBMan instance, these values may simply be place-holders. The
++ * idea is simply that we be able to distinguish between them, eg. so that SWP
++ * descriptors can identify which QBMan instance they belong to.
++ */
++struct qbman_block_desc {
++ void *ccsr_reg_bar;
++ int irq_rerr;
++ int irq_nrerr;
++};
++
++enum qbman_eqcr_mode {
++ qman_eqcr_vb_ring = 2, /* Valid bit, with eqcr in ring mode */
++ qman_eqcr_vb_array, /* Valid bit, with eqcr in array mode */
++};
++
++/**
++ * struct qbman_swp_desc - qbman software portal descriptor structure
++ * @block: The QBMan instance.
++ * @cena_bar: Cache-enabled portal register map.
++ * @cinh_bar: Cache-inhibited portal register map.
++ * @irq: -1 if unused (or unassigned)
++ * @idx: SWPs within a QBMan are indexed. -1 if opaque to the user.
++ * @qman_version: the qman version.
++ * @eqcr_mode: Select the eqcr mode, currently only valid bit ring mode and
++ * valid bit array mode are supported.
++ *
++ * Descriptor for a QBMan software portal, expressed in terms that make sense to
++ * the user context. Ie. on MC, this information is likely to be true-physical,
++ * and instantiated statically at compile-time. On GPP, this information is
++ * likely to be obtained via "discovery" over a partition's "MC bus"
++ * (ie. in response to a MC portal command), and would take into account any
++ * virtualisation of the GPP user's address space and/or interrupt numbering.
++ */
++struct qbman_swp_desc {
++ const struct qbman_block_desc *block;
++ uint8_t *cena_bar;
++ uint8_t *cinh_bar;
++ int irq;
++ int idx;
++ uint32_t qman_version;
++ enum qbman_eqcr_mode eqcr_mode;
++};
++
++/* Driver object for managing a QBMan portal */
++struct qbman_swp;
++
++/**
++ * struct qbman_fd - basci structure for qbman frame descriptor
++ * @words: for easier/faster copying the whole FD structure.
++ * @addr_lo: the lower 32 bits of the address in FD.
++ * @addr_hi: the upper 32 bits of the address in FD.
++ * @len: the length field in FD.
++ * @bpid_offset: represent the bpid and offset fields in FD. offset in
++ * the MS 16 bits, BPID in the LS 16 bits.
++ * @frc: frame context
++ * @ctrl: the 32bit control bits including dd, sc,... va, err.
++ * @flc_lo: the lower 32bit of flow context.
++ * @flc_hi: the upper 32bits of flow context.
++ *
++ * Place-holder for FDs, we represent it via the simplest form that we need for
++ * now. Different overlays may be needed to support different options, etc. (It
++ * is impractical to define One True Struct, because the resulting encoding
++ * routines (lots of read-modify-writes) would be worst-case performance whether
++ * or not circumstances required them.)
++ *
++ * Note, as with all data-structures exchanged between software and hardware (be
++ * they located in the portal register map or DMA'd to and from main-memory),
++ * the driver ensures that the caller of the driver API sees the data-structures
++ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words
++ * contained within this structure are represented in host-endianness, even if
++ * hardware always treats them as little-endian. As such, if any of these fields
++ * are interpreted in a binary (rather than numerical) fashion by hardware
++ * blocks (eg. accelerators), then the user should be careful. We illustrate
++ * with an example;
++ *
++ * Suppose the desired behaviour of an accelerator is controlled by the "frc"
++ * field of the FDs that are sent to it. Suppose also that the behaviour desired
++ * by the user corresponds to an "frc" value which is expressed as the literal
++ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit
++ * value in which 0xfe is the first byte and 0xba is the last byte, and as
++ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If
++ * the software is little-endian also, this can simply be achieved by setting
++ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set
++ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is
++ * to treat the 32-bit words as numerical values, in which the offset of a field
++ * from the beginning of the first byte (as required or generated by hardware)
++ * is numerically encoded by a left-shift (ie. by raising the field to a
++ * corresponding power of 2). Ie. in the current example, software could set
++ * "frc" in the following way, and it would work correctly on both little-endian
++ * and big-endian operation;
++ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24);
++ */
++struct qbman_fd {
++ union {
++ uint32_t words[8];
++ struct qbman_fd_simple {
++ uint32_t addr_lo;
++ uint32_t addr_hi;
++ uint32_t len;
++ uint32_t bpid_offset;
++ uint32_t frc;
++ uint32_t ctrl;
++ uint32_t flc_lo;
++ uint32_t flc_hi;
++ } simple;
++ };
++};
++
++#endif /* !_FSL_QBMAN_BASE_H */
+diff --git a/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h
+new file mode 100644
+index 0000000..ddcabcf
+--- /dev/null
++++ b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h
+@@ -0,0 +1,1089 @@
++/* Copyright (C) 2014 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _FSL_QBMAN_PORTAL_H
++#define _FSL_QBMAN_PORTAL_H
++
++#include <drivers/fsl_qbman_base.h>
++
++/**
++ * DOC - QBMan portal APIs to implement the following functions:
++ * - Initialize and destroy Software portal object.
++ * - Read and write Software portal interrupt registers.
++ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
++ * command etc.
++ * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
++ * parsing the dequeue response in DQRR and memeory, parsing the state change
++ * notifications etc.
++ * - Release, including setting the release descriptor, and issuing the buffer
++ * release command.
++ * - Acquire, acquire the buffer from the given buffer pool.
++ * - FQ management.
++ * - Channel management, enable/disable CDAN with or without context.
++ */
++
++/**
++ * qbman_swp_init() - Create a functional object representing the given
++ * QBMan portal descriptor.
++ * @d: the given qbman swp descriptor
++ *
++ * Return qbman_swp portal object for success, NULL if the object cannot
++ * be created.
++ */
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
++
++/**
++ * qbman_swp_finish() - Create and destroy a functional object representing
++ * the given QBMan portal descriptor.
++ * @p: the qbman_swp object to be destroyed.
++ *
++ */
++void qbman_swp_finish(struct qbman_swp *p);
++
++/**
++ * qbman_swp_get_desc() - Get the descriptor of the given portal object.
++ * @p: the given portal object.
++ *
++ * Return the descriptor for this portal.
++ */
++const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *);
++
++ /**************/
++ /* Interrupts */
++ /**************/
++
++/* EQCR ring interrupt */
++#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
++/* Enqueue command dispatched interrupt */
++#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
++/* DQRR non-empty interrupt */
++#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
++/* RCR ring interrupt */
++#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
++/* Release command dispatched interrupt */
++#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
++/* Volatile dequeue command interrupt */
++#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
++
++/**
++ * qbman_swp_interrupt_get_vanish() - Get the data in software portal
++ * interrupt status disable register.
++ * @p: the given software portal object.
++ *
++ * Return the settings in SWP_ISDR register.
++ */
++uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
++
++/**
++ * qbman_swp_interrupt_set_vanish() - Set the data in software portal
++ * interrupt status disable register.
++ * @p: the given software portal object.
++ * @mask: The value to set in SWP_IDSR register.
++ */
++void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
++
++/**
++ * qbman_swp_interrupt_read_status() - Get the data in software portal
++ * interrupt status register.
++ * @p: the given software portal object.
++ *
++ * Return the settings in SWP_ISR register.
++ */
++uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
++
++/**
++ * qbman_swp_interrupt_clear_status() - Set the data in software portal
++ * interrupt status register.
++ * @p: the given software portal object.
++ * @mask: The value to set in SWP_ISR register.
++ */
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
++
++/**
++ * qbman_swp_interrupt_get_trigger() - Get the data in software portal
++ * interrupt enable register.
++ * @p: the given software portal object.
++ *
++ * Return the settings in SWP_IER register.
++ */
++uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
++
++/**
++ * qbman_swp_interrupt_set_trigger() - Set the data in software portal
++ * interrupt enable register.
++ * @p: the given software portal object.
++ * @mask: The value to set in SWP_IER register.
++ */
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
++
++/**
++ * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
++ * interrupt inhibit register.
++ * @p: the given software portal object.
++ *
++ * Return the settings in SWP_IIR register.
++ */
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
++
++/**
++ * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
++ * interrupt inhibit register.
++ * @p: the given software portal object.
++ * @mask: The value to set in SWP_IIR register.
++ */
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
++
++ /************/
++ /* Dequeues */
++ /************/
++
++/**
++ * struct qbman_result - structure for qbman dequeue response and/or
++ * notification.
++ * @dont_manipulate_directly: the 16 32bit data to represent the whole
++ * possible qbman dequeue result.
++ */
++struct qbman_result {
++ uint32_t dont_manipulate_directly[16];
++};
++
++/* TODO:
++ *A DQRI interrupt can be generated when there are dequeue results on the
++ * portal's DQRR (this mechanism does not deal with "pull" dequeues to
++ * user-supplied 'storage' addresses). There are two parameters to this
++ * interrupt source, one is a threshold and the other is a timeout. The
++ * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
++ * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
++ * For timeout, an approximation to the desired nanosecond-granularity value is
++ * made, so there are get and set APIs to allow the user to see what actual
++ * timeout is set (compared to the timeout that was requested). */
++int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
++int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
++int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
++
++
++/* ------------------- */
++/* Push-mode dequeuing */
++/* ------------------- */
++
++/* The user of a portal can enable and disable push-mode dequeuing of up to 16
++ * channels independently. It does not specify this toggling by channel IDs, but
++ * rather by specifing the index (from 0 to 15) that has been mapped to the
++ * desired channel.
++ */
++
++/**
++ * qbman_swp_push_get() - Get the push dequeue setup.
++ * @s: the software portal object.
++ * @channel_idx: the channel index to query.
++ * @enabled: returned boolean to show whether the push dequeue is enabled for
++ * the given channel.
++ */
++void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
++
++/**
++ * qbman_swp_push_set() - Enable or disable push dequeue.
++ * @s: the software portal object.
++ * @channel_idx: the channel index..
++ * @enable: enable or disable push dequeue.
++ *
++ * The user of a portal can enable and disable push-mode dequeuing of up to 16
++ * channels independently. It does not specify this toggling by channel IDs, but
++ * rather by specifying the index (from 0 to 15) that has been mapped to the
++ * desired channel.
++ */
++void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
++
++/* ------------------- */
++/* Pull-mode dequeuing */
++/* ------------------- */
++
++/**
++ * struct qbman_pull_desc - the structure for pull dequeue descriptor
++ * @dont_manipulate_directly: the 6 32bit data to represent the whole
++ * possible settings for pull dequeue descriptor.
++ */
++struct qbman_pull_desc {
++ uint32_t dont_manipulate_directly[6];
++};
++
++enum qbman_pull_type_e {
++ /* dequeue with priority precedence, respect intra-class scheduling */
++ qbman_pull_type_prio = 1,
++ /* dequeue with active FQ precedence, respect ICS */
++ qbman_pull_type_active,
++ /* dequeue with active FQ precedence, no ICS */
++ qbman_pull_type_active_noics
++};
++
++/**
++ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ * @d: the pull dequeue descriptor to be cleared.
++ */
++void qbman_pull_desc_clear(struct qbman_pull_desc *d);
++
++/**
++ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
++ * @d: the pull dequeue descriptor to be set.
++ * @storage: the pointer of the memory to store the dequeue result.
++ * @storage_phys: the physical address of the storage memory.
++ * @stash: to indicate whether write allocate is enabled.
++ *
++ * If not called, or if called with 'storage' as NULL, the result pull dequeues
++ * will produce results to DQRR. If 'storage' is non-NULL, then results are
++ * produced to the given memory location (using the physical/DMA address which
++ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
++ * those writes to main-memory express a cache-warming attribute.
++ */
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct qbman_result *storage,
++ dma_addr_t storage_phys,
++ int stash);
++/**
++ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
++ * @d: the pull dequeue descriptor to be set.
++ * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
++ */
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
++ uint8_t numframes);
++/**
++ * qbman_pull_desc_set_token() - Set dequeue token for pull command
++ * @d: the dequeue descriptor
++ * @token: the token to be set
++ *
++ * token is the value that shows up in the dequeue response that can be used to
++ * detect when the results have been published. The easiest technique is to zero
++ * result "storage" before issuing a dequeue, and use any non-zero 'token' value
++ */
++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
++
++/* Exactly one of the following descriptor "actions" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * - pull dequeue from the given frame queue (FQ)
++ * - pull dequeue from any FQ in the given work queue (WQ)
++ * - pull dequeue from any FQ in any WQ in the given channel
++ */
++/**
++ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
++ * @fqid: the frame queue index of the given FQ.
++ */
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
++
++/**
++ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
++ * @wqid: composed of channel id and wqid within the channel.
++ * @dct: the dequeue command type.
++ */
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
++ enum qbman_pull_type_e dct);
++
++/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
++ * dequeues.
++ * @chid: the channel id to be dequeued.
++ * @dct: the dequeue command type.
++ */
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
++ enum qbman_pull_type_e dct);
++
++/**
++ * qbman_swp_pull() - Issue the pull dequeue command
++ * @s: the software portal object.
++ * @d: the software portal descriptor which has been configured with
++ * the set of qbman_pull_desc_set_*() calls.
++ *
++ * Return 0 for success, and -EBUSY if the software portal is not ready
++ * to do pull dequeue.
++ */
++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
++
++/* -------------------------------- */
++/* Polling DQRR for dequeue results */
++/* -------------------------------- */
++
++/**
++ * qbman_swp_dqrr_next() - Get an valid DQRR entry.
++ * @s: the software portal object.
++ *
++ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
++ * only once, so repeated calls can return a sequence of DQRR entries, without
++ * requiring they be consumed immediately or in any particular order.
++ */
++const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *);
++
++/**
++ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
++ * qbman_swp_dqrr_next().
++ * @s: the software portal object.
++ * @dq: the DQRR entry to be consumed.
++ */
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
++
++/**
++ * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
++ * @dqrr: the given dqrr object.
++ *
++ * Return dqrr index.
++ */
++uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr);
++
++/**
++ * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
++ * given portal
++ * @s: the given portal.
++ * @idx: the dqrr index.
++ *
++ * Return dqrr entry object.
++ */
++struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
++
++/* ------------------------------------------------- */
++/* Polling user-provided storage for dequeue results */
++/* ------------------------------------------------- */
++
++/**
++ * qbman_result_has_new_result() - Check and get the dequeue response from the
++ * dq storage memory set in pull dequeue command
++ * @s: the software portal object.
++ * @dq: the dequeue result read from the memory.
++ *
++ * Only used for user-provided storage of dequeue results, not DQRR. For
++ * efficiency purposes, the driver will perform any required endianness
++ * conversion to ensure that the user's dequeue result storage is in host-endian
++ * format (whether or not that is the same as the little-endian format that
++ * hardware DMA'd to the user's storage). As such, once the user has called
++ * qbman_result_has_new_result() and been returned a valid dequeue result,
++ * they should not call it again on the same memory location (except of course
++ * if another dequeue command has been executed to produce a new result to that
++ * location).
++ *
++ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
++ * dequeue result.
++ */
++int qbman_result_has_new_result(struct qbman_swp *s,
++ const struct qbman_result *dq);
++
++/* -------------------------------------------------------- */
++/* Parsing dequeue entries (DQRR and user-provided storage) */
++/* -------------------------------------------------------- */
++
++/**
++ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
++ * @dq: the dequeue result to be checked.
++ *
++ * DQRR entries may contain non-dequeue results, ie. notifications
++ */
++int qbman_result_is_DQ(const struct qbman_result *);
++
++/**
++ * qbman_result_is_SCN() - Check the dequeue result is notification or not
++ * @dq: the dequeue result to be checked.
++ *
++ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
++ * notifications" of one type or another. Some APIs apply to all of them, of the
++ * form qbman_result_SCN_***().
++ */
++static inline int qbman_result_is_SCN(const struct qbman_result *dq)
++{
++ return !qbman_result_is_DQ(dq);
++}
++
++/* Recognise different notification types, only required if the user allows for
++ * these to occur, and cares about them when they do.
++ */
++
++/**
++ * qbman_result_is_FQDAN() - Check for FQ Data Availability
++ * @dq: the qbman_result object.
++ *
++ * Return 1 if this is FQDAN.
++ */
++int qbman_result_is_FQDAN(const struct qbman_result *dq);
++
++/**
++ * qbman_result_is_CDAN() - Check for Channel Data Availability
++ * @dq: the qbman_result object to check.
++ *
++ * Return 1 if this is CDAN.
++ */
++int qbman_result_is_CDAN(const struct qbman_result *dq);
++
++/**
++ * qbman_result_is_CSCN() - Check for Congestion State Change
++ * @dq: the qbman_result object to check.
++ *
++ * Return 1 if this is CSCN.
++ */
++int qbman_result_is_CSCN(const struct qbman_result *dq);
++
++/**
++ * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
++ * @dq: the qbman_result object to check.
++ *
++ * Return 1 if this is BPSCN.
++ */
++int qbman_result_is_BPSCN(const struct qbman_result *dq);
++
++/**
++ * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
++ * @dq: the qbman_result object to check.
++ *
++ * Return 1 if this is CGCU.
++ */
++int qbman_result_is_CGCU(const struct qbman_result *dq);
++
++/* Frame queue state change notifications; (FQDAN in theory counts too as it
++ * leaves a FQ parked, but it is primarily a data availability notification)
++ */
++
++/**
++ * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
++ * @dq: the qbman_result object to check.
++ *
++ * Return 1 if this is FQRN.
++ */
++int qbman_result_is_FQRN(const struct qbman_result *);
++
++/**
++ * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
++ * @dq: the qbman_result object to check.
++ *
++ * Return 1 if this is FQRNI.
++ */
++int qbman_result_is_FQRNI(const struct qbman_result *);
++
++/**
++ * qbman_result_is_FQPN() - Check for FQ Park Notification
++ * @dq: the qbman_result object to check.
++ *
++ * Return 1 if this is FQPN.
++ */
++int qbman_result_is_FQPN(const struct qbman_result *dq);
++
++/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
++ */
++/* FQ empty */
++#define QBMAN_DQ_STAT_FQEMPTY 0x80
++/* FQ held active */
++#define QBMAN_DQ_STAT_HELDACTIVE 0x40
++/* FQ force eligible */
++#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
++/* Valid frame */
++#define QBMAN_DQ_STAT_VALIDFRAME 0x10
++/* FQ ODP enable */
++#define QBMAN_DQ_STAT_ODPVALID 0x04
++/* Volatile dequeue */
++#define QBMAN_DQ_STAT_VOLATILE 0x02
++/* volatile dequeue command is expired */
++#define QBMAN_DQ_STAT_EXPIRED 0x01
++
++/**
++ * qbman_result_DQ_flags() - Get the STAT field of dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the state field.
++ */
++uint32_t qbman_result_DQ_flags(const struct qbman_result *dq);
++
++/**
++ * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
++ * command.
++ * @dq: the dequeue result.
++ *
++ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
++ */
++static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
++{
++ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
++}
++
++/**
++ * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
++ * completed.
++ * @dq: the dequeue result.
++ *
++ * Return boolean.
++ */
++static inline int qbman_result_DQ_is_pull_complete(
++ const struct qbman_result *dq)
++{
++ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
++}
++
++/**
++ * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response
++ * seqnum is valid only if VALIDFRAME flag is TRUE
++ * @dq: the dequeue result.
++ *
++ * Return seqnum.
++ */
++uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
++
++/**
++ * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
++ * odpid is valid only if ODPVAILD flag is TRUE.
++ * @dq: the dequeue result.
++ *
++ * Return odpid.
++ */
++uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
++
++/**
++ * qbman_result_DQ_fqid() - Get the fqid in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return fqid.
++ */
++uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
++
++/**
++ * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the byte count remaining in the FQ.
++ */
++uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
++
++/**
++ * qbman_result_DQ_frame_count - Get the frame count in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the frame count remaining in the FQ.
++ */
++uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
++
++/**
++ * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the frame queue context.
++ */
++uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
++
++/**
++ * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
++ * @dq: the dequeue result.
++ *
++ * Return the frame descriptor.
++ */
++const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
++
++/* State-change notifications (FQDAN/CDAN/CSCN/...). */
++
++/**
++ * qbman_result_SCN_state() - Get the state field in State-change notification
++ * @scn: the state change notification.
++ *
++ * Return the state in the notifiation.
++ */
++uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
++
++/**
++ * qbman_result_SCN_rid() - Get the resource id from the notification
++ * @scn: the state change notification.
++ *
++ * Return the resource id.
++ */
++uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
++
++/**
++ * qbman_result_SCN_ctx() - get the context from the notification
++ * @scn: the state change notification.
++ *
++ * Return the context.
++ */
++uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
++
++/**
++ * qbman_result_SCN_state_in_mem() - Get the state in notification written
++ * in memory
++ * @scn: the state change notification.
++ *
++ * Return the state.
++ */
++uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn);
++
++/**
++ * qbman_result_SCN_rid_in_mem() - Get the resource id in notification written
++ * in memory.
++ * @scn: the state change notification.
++ *
++ * Return the resource id.
++ */
++uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn);
++
++
++/* Type-specific "resource IDs". Mainly for illustration purposes, though it
++ * also gives the appropriate type widths.
++ */
++/* Get the FQID from the FQDAN */
++#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
++/* Get the FQID from the FQRN */
++#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
++/* Get the FQID from the FQRNI */
++#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
++/* Get the FQID from the FQPN */
++#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
++/* Get the channel ID from the CDAN */
++#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
++/* Get the CGID from the CSCN */
++#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
++
++/**
++ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
++ * @scn: the state change notification.
++ *
++ * Return the buffer pool id.
++ */
++uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
++
++/**
++ * qbman_result_bpscn_has_free_bufs() - Check whether there are free
++ * buffers in the pool from BPSCN.
++ * @scn: the state change notification.
++ *
++ * Return the number of free buffers.
++ */
++int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
++
++/**
++ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
++ * buffer pool is depleted.
++ * @scn: the state change notification.
++ *
++ * Return the status of buffer pool depletion.
++ */
++int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
++
++/**
++ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
++ * pool is surplus or not.
++ * @scn: the state change notification.
++ *
++ * Return the status of buffer pool surplus.
++ */
++int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
++
++/**
++ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
++ * @scn: the state change notification.
++ *
++ * Return the BPSCN context.
++ */
++uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
++
++/* Parsing CGCU */
++/**
++ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
++ * @scn: the state change notification.
++ *
++ * Return the CGCU resource id.
++ */
++uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
++
++/**
++ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
++ * @scn: the state change notification.
++ *
++ * Return instantaneous count in the CGCU notification.
++ */
++uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
++
++ /************/
++ /* Enqueues */
++ /************/
++
++/**
++ * struct qbman_eq_desc - structure of enqueue descriptor
++ * @dont_manipulate_directly: the 8 32bit data to represent the whole
++ * possible qbman enqueue setting in enqueue descriptor.
++ */
++struct qbman_eq_desc {
++ uint32_t dont_manipulate_directly[8];
++};
++
++/**
++ * struct qbman_eq_response - structure of enqueue response
++ * @dont_manipulate_directly: the 16 32bit data to represent the whole
++ * enqueue response.
++ */
++struct qbman_eq_response {
++ uint32_t dont_manipulate_directly[16];
++};
++
++/**
++ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ * @d: the given enqueue descriptor.
++ */
++void qbman_eq_desc_clear(struct qbman_eq_desc *d);
++
++/* Exactly one of the following descriptor "actions" should be set. (Calling
++ * any one of these will replace the effect of any prior call to one of these.)
++ * - enqueue without order-restoration
++ * - enqueue with order-restoration
++ * - fill a hole in the order-restoration sequence, without any enqueue
++ * - advance NESN (Next Expected Sequence Number), without any enqueue
++ * 'respond_success' indicates whether an enqueue response should be DMA'd
++ * after success (otherwise a response is DMA'd only after failure).
++ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
++ * be enqueued.
++ */
++
++/**
++ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
++ * @d: the enqueue descriptor.
++ * @response_success: 1 = enqueue with response always; 0 = enqueue with
++ * rejections returned on a FQ.
++ */
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
++/**
++ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
++ * @d: the enqueue descriptor.
++ * @response_success: 1 = enqueue with response always; 0 = enqueue with
++ * rejections returned on a FQ.
++ * @opr_id: the order point record id.
++ * @seqnum: the order restoration sequence number.
++ * @incomplete: indiates whether this is the last fragments using the same
++ * sequeue number.
++ */
++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
++ uint32_t opr_id, uint32_t seqnum, int incomplete);
++
++/**
++ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
++ * without any enqueue
++ * @d: the enqueue descriptor.
++ * @opr_id: the order point record id.
++ * @seqnum: the order restoration sequence number.
++ */
++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
++ uint32_t seqnum);
++
++/**
++ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
++ * without any enqueue
++ * @d: the enqueue descriptor.
++ * @opr_id: the order point record id.
++ * @seqnum: the order restoration sequence number.
++ */
++void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
++ uint32_t seqnum);
++/**
++ * qbman_eq_desc_set_response() - Set the enqueue response info.
++ * @d: the enqueue descriptor
++ * @storage_phys: the physical address of the enqueue response in memory.
++ * @stash: indicate that the write allocation enabled or not.
++ *
++ * In the case where an enqueue response is DMA'd, this determines where that
++ * response should go. (The physical/DMA address is given for hardware's
++ * benefit, but software should interpret it as a "struct qbman_eq_response"
++ * data structure.) 'stash' controls whether or not the write to main-memory
++ * expresses a cache-warming attribute.
++ */
++void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
++ dma_addr_t storage_phys,
++ int stash);
++
++/**
++ * qbman_eq_desc_set_token() - Set token for the enqueue command
++ * @d: the enqueue descriptor
++ * @token: the token to be set.
++ *
++ * token is the value that shows up in an enqueue response that can be used to
++ * detect when the results have been published. The easiest technique is to zero
++ * result "storage" before issuing an enqueue, and use any non-zero 'token'
++ * value.
++ */
++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
++
++/**
++ * Exactly one of the following descriptor "targets" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * - enqueue to a frame queue
++ * - enqueue to a queuing destination
++ * Note, that none of these will have any affect if the "action" type has been
++ * set to "orp_hole" or "orp_nesn".
++ */
++/**
++ * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
++ * @d: the enqueue descriptor
++ * @fqid: the id of the frame queue to be enqueued.
++ */
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
++
++/**
++ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
++ * @d: the enqueue descriptor
++ * @qdid: the id of the queuing destination to be enqueued.
++ * @qd_bin: the queuing destination bin
++ * @qd_prio: the queuing destination priority.
++ */
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
++ uint32_t qd_bin, uint32_t qd_prio);
++
++/**
++ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
++ * @d: the enqueue descriptor
++ * @enable: boolean to enable/disable EQDI
++ *
++ * Determines whether or not the portal's EQDI interrupt source should be
++ * asserted after the enqueue command is completed.
++ */
++void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
++
++/**
++ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
++ * @d: the enqueue descriptor.
++ * @enable: enabled/disable DCA mode.
++ * @dqrr_idx: DCAP_CI, the DCAP consumer index.
++ * @park: determine the whether park the FQ or not
++ *
++ * Determines whether or not a portal DQRR entry should be consumed once the
++ * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
++ * held-active (order-preserving) FQ, whether the FQ should be parked instead of
++ * being rescheduled.)
++ */
++void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
++ uint32_t dqrr_idx, int park);
++
++/**
++ * qbman_swp_enqueue() - Issue an enqueue command.
++ * @s: the software portal used for enqueue.
++ * @d: the enqueue descriptor.
++ * @fd: the frame descriptor to be enqueued.
++ *
++ * Please note that 'fd' should only be NULL if the "action" of the
++ * descriptor is "orp_hole" or "orp_nesn".
++ *
++ * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
++ */
++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd);
++
++/* TODO:
++ * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
++ * @s: the software portal.
++ * @thresh: the threshold to trigger the EQRI interrupt.
++ *
++ * An EQRI interrupt can be generated when the fill-level of EQCR falls below
++ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
++ */
++int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
++
++ /*******************/
++ /* Buffer releases */
++ /*******************/
++/**
++ * struct qbman_release_desc - The structure for buffer release descriptor
++ * @dont_manipulate_directly: the 32bit data to represent the whole
++ * possible settings of qbman release descriptor.
++ */
++struct qbman_release_desc {
++ uint32_t dont_manipulate_directly[1];
++};
++
++/**
++ * qbman_release_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ * @d: the qbman release descriptor.
++ */
++void qbman_release_desc_clear(struct qbman_release_desc *d);
++
++/**
++ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
++ * @d: the qbman release descriptor.
++ */
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid);
++
++/**
++ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
++ * interrupt source should be asserted after the release command is completed.
++ * @d: the qbman release descriptor.
++ */
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
++
++/**
++ * qbman_swp_release() - Issue a buffer release command.
++ * @s: the software portal object.
++ * @d: the release descriptor.
++ * @buffers: a pointer pointing to the buffer address to be released.
++ * @num_buffers: number of buffers to be released, must be less than 8.
++ *
++ * Return 0 for success, -EBUSY if the release command ring is not ready.
++ */
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const uint64_t *buffers, unsigned int num_buffers);
++
++/* TODO:
++ * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
++ * @s: the software portal.
++ * @thresh: the threshold.
++ * An RCRI interrupt can be generated when the fill-level of RCR falls below
++ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
++ */
++int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
++
++ /*******************/
++ /* Buffer acquires */
++ /*******************/
++/**
++ * qbman_swp_acquire() - Issue a buffer acquire command.
++ * @s: the software portal object.
++ * @bpid: the buffer pool index.
++ * @buffers: a pointer pointing to the acquired buffer address|es.
++ * @num_buffers: number of buffers to be acquired, must be less than 8.
++ *
++ * Return 0 for success, or negative error code if the acquire command
++ * fails.
++ */
++int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
++ unsigned int num_buffers);
++
++ /*****************/
++ /* FQ management */
++ /*****************/
++/**
++ * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
++ * @s: the software portal object.
++ * @fqid: the index of frame queue to be scheduled.
++ *
++ * There are a couple of different ways that a FQ can end up parked state,
++ * This schedules it.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
++
++/**
++ * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
++ * @s: the software portal object.
++ * @fqid: the index of frame queue to be forced.
++ *
++ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
++ * and thus be available for selection by any channel-dequeuing behaviour (push
++ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
++ * empty at the time this happens, the resulting dq_entry will have no FD.
++ * (qbman_result_DQ_fd() will return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
++
++/**
++ * These functions change the FQ flow-control stuff between XON/XOFF. (The
++ * default is XON.) This setting doesn't affect enqueues to the FQ, just
++ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
++ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
++ * changed to XOFF after it had already become truly-scheduled to a channel, and
++ * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
++ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
++ * return NULL.)
++ */
++/**
++ * qbman_swp_fq_xon() - XON the frame queue.
++ * @s: the software portal object.
++ * @fqid: the index of frame queue.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
++/**
++ * qbman_swp_fq_xoff() - XOFF the frame queue.
++ * @s: the software portal object.
++ * @fqid: the index of frame queue.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
++
++ /**********************/
++ /* Channel management */
++ /**********************/
++
++/**
++ * If the user has been allocated a channel object that is going to generate
++ * CDANs to another channel, then these functions will be necessary.
++ * CDAN-enabled channels only generate a single CDAN notification, after which
++ * it they need to be reenabled before they'll generate another. (The idea is
++ * that pull dequeuing will occur in reaction to the CDAN, followed by a
++ * reenable step.) Each function generates a distinct command to hardware, so a
++ * combination function is provided if the user wishes to modify the "context"
++ * (which shows up in each CDAN message) each time they reenable, as a single
++ * command to hardware.
++ */
++
++/**
++ * qbman_swp_CDAN_set_context() - Set CDAN context
++ * @s: the software portal object.
++ * @channelid: the channel index.
++ * @ctx: the context to be set in CDAN.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
++ uint64_t ctx);
++
++/**
++ * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
++ * @s: the software portal object.
++ * @channelid: the index of the channel to generate CDAN.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
++
++/**
++ * qbman_swp_CDAN_disable() - disable CDAN for the channel.
++ * @s: the software portal object.
++ * @channelid: the index of the channel to generate CDAN.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
++
++/**
++ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
++ * @s: the software portal object.
++ * @channelid: the index of the channel to generate CDAN.
++ * @ctx: the context set in CDAN.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
++ uint64_t ctx);
++int qbman_swp_fill_ring(struct qbman_swp *s,
++ const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd,
++ uint8_t burst_index);
++int qbman_swp_flush_ring(struct qbman_swp *s);
++void qbman_sync(void);
++int qbman_swp_send_multiple(struct qbman_swp *s,
++ const struct qbman_eq_desc *d,
++ const struct qbman_fd *fd,
++ int frames_to_send);
++
++int qbman_check_command_complete(struct qbman_swp *s,
++ const struct qbman_result *dq);
++#endif /* !_FSL_QBMAN_PORTAL_H */
+diff --git a/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h b/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h
+new file mode 100644
+index 0000000..b35c3ee
+--- /dev/null
++++ b/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h
+@@ -0,0 +1,313 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _RTE_ETH_DPAA2_PVT_H_
++#define _RTE_ETH_DPAA2_PVT_H_
++
++#include <rte_memory.h>
++#include <rte_mbuf.h>
++#include <rte_atomic.h>
++#include <fsl_mc_sys.h>
++#include <eal_vfio.h>
++#include <eal_vfio_fsl_mc.h>
++
++typedef uint64_t dma_addr_t;
++
++#define FALSE 0
++#define TRUE 1
++#ifndef false
++#define false FALSE
++#endif
++#ifndef true
++#define true TRUE
++#endif
++#define lower_32_bits(x) ((uint32_t)(x))
++#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
++
++#ifndef ETH_ADDR_LEN
++#define ETH_ADDR_LEN 6
++#endif
++#ifndef ETH_VLAN_HLEN
++#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
++#endif
++
++#define NUM_MAX_RECV_FRAMES 16
++
++#define MC_PORTAL_INDEX 0
++#define NUM_DPIO_REGIONS 2
++#define NUM_DQS_PER_QUEUE 2
++#define MC_PORTALS_BASE_PADDR 0x00080C000000ULL
++#define MC_PORTAL_STRIDE 0x10000
++#define MC_PORTAL_SIZE 64
++#define MC_PORTAL_ID_TO_PADDR(portal_id) \
++(MC_PORTALS_BASE_PADDR + (portal_id) * MC_PORTAL_STRIDE)
++
++struct dpaa2_dpio_dev {
++ TAILQ_ENTRY(dpaa2_dpio_dev) next; /**< Pointer to Next device instance */
++ uint16_t index; /**< Index of a instance in the list */
++ rte_atomic16_t ref_count; /**< How many thread contexts are sharing this.*/
++ struct fsl_mc_io *dpio; /** handle to DPIO portal object */
++ uint16_t token;
++ struct qbman_swp *sw_portal; /** SW portal object */
++ const struct qbman_result *dqrr[4]; /**< DQRR Entry for this SW portal */
++ pthread_mutex_t lock; /** Required when Portal is shared */
++ void *mc_portal; /**< MC Portal for configuring this device */
++ uintptr_t qbman_portal_ce_paddr; /**< Physical address of Cache Enabled Area */
++ uintptr_t ce_size; /**< Size of the CE region */
++ uintptr_t qbman_portal_ci_paddr; /**< Physical address of Cache Inhibit Area */
++ uintptr_t ci_size; /**< Size of the CI region */
++ void *intr_handle;
++ int32_t vfio_fd; /**< File descriptor received via VFIO */
++ int32_t hw_id; /**< An unique ID of this DPIO device instance */
++};
++
++struct queue_storage_info_t {
++ struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
++ struct qbman_result *active_dqs;
++ int toggle;
++};
++
++struct thread_io_info_t {
++ struct dpaa2_dpio_dev *dpio_dev;
++ struct dpaa2_dpio_dev *sec_dpio_dev;
++ struct qbman_result *global_active_dqs;
++};
++
++/*! Global per thread DPIO portal */
++extern __thread struct thread_io_info_t thread_io_info;
++/*! Global MCP list */
++extern void *(*mcp_ptr_list);
++
++/* Refer to Table 7-3 in SEC BG */
++struct qbman_fle {
++ uint32_t addr_lo;
++ uint32_t addr_hi;
++ uint32_t length;
++ /* FMT must be 00, MSB is final bit */
++ uint32_t fin_bpid_offset;
++ uint32_t frc;
++ uint32_t reserved[3]; /* Not used currently */
++};
++
++/* Maximum release/acquire from QBMAN */
++#define DPAA2_MBUF_MAX_ACQ_REL 7
++
++#define MAX_BPID 256
++
++/*Macros to define operations on FD*/
++#define DPAA2_SET_FD_ADDR(fd, addr) \
++ fd->simple.addr_lo = lower_32_bits((uint64_t)addr); \
++ fd->simple.addr_hi = upper_32_bits((uint64_t)addr);
++#define DPAA2_SET_FD_LEN(fd, length) fd->simple.len = length
++#define DPAA2_SET_FD_BPID(fd, bpid) fd->simple.bpid_offset |= bpid;
++#define DPAA2_SET_FD_IVP(fd) ((fd->simple.bpid_offset |= 0x00004000))
++#define DPAA2_SET_FD_OFFSET(fd, offset) (fd->simple.bpid_offset |= (uint32_t)(offset) << 16);
++#define DPAA2_SET_FD_FRC(fd, frc) fd->simple.frc = frc;
++#define DPAA2_RESET_FD_CTRL(fd) fd->simple.ctrl = 0;
++
++#define DPAA2_SET_FD_ASAL(fd, asal) (fd->simple.ctrl |= (asal << 16))
++#define DPAA2_SET_FD_FLC(fd, addr) \
++ fd->simple.flc_lo = lower_32_bits((uint64_t)addr); \
++ fd->simple.flc_hi = upper_32_bits((uint64_t)addr);
++#define DPAA2_GET_FLE_ADDR(fle) \
++ (uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo)
++#define DPAA2_SET_FLE_ADDR(fle, addr) \
++ fle->addr_lo = lower_32_bits((uint64_t)addr); \
++ fle->addr_hi = upper_32_bits((uint64_t)addr);
++#define DPAA2_SET_FLE_OFFSET(fle, offset) (fle)->fin_bpid_offset |= (uint32_t)(offset) << 16;
++#define DPAA2_SET_FLE_BPID(fle, bpid) (fle)->fin_bpid_offset |= (uint64_t)bpid;
++#define DPAA2_GET_FLE_BPID(fle, bpid) (fle->fin_bpid_offset & 0x000000ff)
++#define DPAA2_SET_FLE_FIN(fle) fle->fin_bpid_offset |= (uint64_t)1 << 31;
++#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
++#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
++ fd->simple.bpid_offset |= (uint32_t)1 << 28;
++#define DPAA2_GET_FD_ADDR(fd) \
++ (uint64_t)((((uint64_t)(fd->simple.addr_hi)) << 32) + fd->simple.addr_lo)
++#define DPAA2_GET_FD_LEN(fd) (fd->simple.len)
++#define DPAA2_GET_FD_BPID(fd) ((fd->simple.bpid_offset & 0x00003FFF))
++#define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14)
++#define DPAA2_GET_FD_OFFSET(fd) ((fd->simple.bpid_offset & 0x0FFF0000) >> 16)
++#define DPAA2_GET_FD_FRC(fd) (fd->simple.frc)
++#define DPAA2_GET_FD_FLC(fd) \
++ (uint64_t)((((uint64_t)(fd->simple.flc_hi)) << 32) + fd->simple.flc_lo)
++
++#define DPAA2_SET_FLE_SG_EXT(fle) fle->fin_bpid_offset |= (uint64_t)1<<29;
++#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
++ (fle->fin_bpid_offset & ((uint64_t)1<<29))? 1 : 0
++
++#define DPAA2_INLINE_MBUF_FROM_BUF(buf) \
++ ((struct rte_mbuf *)((uint64_t)buf + DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES))
++#define DPAA2_BUF_FROM_INLINE_MBUF(mbuf) \
++ ((uint8_t *)((uint64_t)mbuf - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES)))
++
++#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
++
++/*Macros to define QBMAN enqueue options */
++#define DPAA2_ETH_EQ_DISABLE 0 /*!< Dont Enqueue the Frame */
++#define DPAA2_ETH_EQ_RESP_ON_SUCC 1 /*!< Enqueue the Frame with
++ response after success*/
++#define DPAA2_ETH_EQ_RESP_ON_FAIL 2 /*!< Enqueue the Frame with
++ response after failure*/
++#define DPAA2_ETH_EQ_NO_RESP 3 /*!< Enqueue the Frame without
++ response*/
++/* Only Enqueue Error responses will be
++ * pushed on FQID_ERR of Enqueue FQ */
++#define DPAA2_EQ_RESP_ERR_FQ 0
++/* All Enqueue responses will be pushed on address
++ * set with qbman_eq_desc_set_response */
++#define DPAA2_EQ_RESP_ALWAYS 1
++
++#define DPAA2_MAX_BUF_POOLS 8
++
++struct dpbp_node {
++ struct dpbp_node *next;
++ struct fsl_mc_io dpbp;
++ uint16_t token;
++ int dpbp_id;
++};
++
++struct buf_pool_cfg {
++ void *addr; /*!< The address from where DPAA2 will carve out the
++ * buffers. 'addr' should be 'NULL' if user wants
++ * to create buffers from the memory which user
++ * asked DPAA2 to reserve during 'nadk init' */
++ phys_addr_t phys_addr; /*!< corresponding physical address
++ * of the memory provided in addr */
++ uint32_t num; /*!< number of buffers */
++ uint32_t size; /*!< size of each buffer. 'size' should include
++ * any headroom to be reserved and alignment */
++ uint16_t align; /*!< Buffer alignment (in bytes) */
++ uint16_t bpid; /*!< The buffer pool id. This will be filled
++ *in by DPAA2 for each buffer pool */
++};
++
++struct buf_pool {
++ uint32_t size;
++ uint32_t num_bufs;
++ uint16_t bpid;
++ uint8_t *h_bpool_mem;
++ struct rte_mempool *mp;
++ struct dpbp_node *dpbp_node;
++};
++
++/*!
++ * Buffer pool list configuration structure. User need to give DPAA2 the
++ * valid number of 'num_buf_pools'.
++ */
++struct dpaa2_bp_list_cfg {
++ struct buf_pool_cfg buf_pool; /* Configuration
++ * of each buffer pool */
++};
++
++struct dpaa2_bp_list {
++ struct dpaa2_bp_list *next;
++ struct rte_mempool *mp;
++ struct buf_pool buf_pool;
++};
++
++struct bp_info {
++ uint32_t size;
++ uint32_t meta_data_size;
++ struct dpaa2_bp_list *bp_list;
++};
++
++extern struct dpaa2_bp_list *h_bp_list;
++
++//todo - this is costly, need to write a fast coversion routine
++static void *dpaa2_mem_ptov(phys_addr_t paddr)
++{
++ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
++ int i;
++
++ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
++ if (paddr >= memseg[i].phys_addr &&
++ (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
++ return (void *)(memseg[i].addr_64 + (paddr - memseg[i].phys_addr));
++ }
++ return NULL;
++}
++
++static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
++{
++ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
++ int i;
++
++ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
++ if (vaddr >= memseg[i].addr_64 &&
++ vaddr < memseg[i].addr_64 + memseg[i].len)
++ return memseg[i].phys_addr + (vaddr - memseg[i].addr_64);
++ }
++ return (phys_addr_t)(NULL);
++}
++
++#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
++/*
++ * When we are using Physical addresses as IO Virtual Addresses,
++ * we call conversion routines nadk_mem_vtop & nadk_mem_ptov wherever required.
++ * These routines are called with help of below MACRO's
++ */
++
++#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) (mbuf->buf_physaddr)
++
++/**
++ * macro to convert Virtual address to IOVA
++ */
++#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
++
++/**
++ * macro to convert IOVA to Virtual address
++ */
++#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
++
++/**
++ * macro to convert modify the memory containing Virtual address to IOVA
++ */
++#define DPAA2_MODIFY_VADDR_TO_IOVA(_mem, _type) \
++ {_mem = (_type)(dpaa2_mem_vtop((uint64_t)(_mem))); }
++
++/**
++ * macro to convert modify the memory containing IOVA to Virtual address
++ */
++#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
++ {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
++
++#else
++#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) (mbuf->buf_addr)
++
++#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
++#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
++#define DPAA2_MODIFY_VADDR_TO_IOVA(_mem, _type)
++#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
++#endif
++
++#endif
+diff --git a/drivers/net/dpaa2/rte_eth_dpbp.c b/drivers/net/dpaa2/rte_eth_dpbp.c
+new file mode 100644
+index 0000000..6a7617d
+--- /dev/null
++++ b/drivers/net/dpaa2/rte_eth_dpbp.c
+@@ -0,0 +1,430 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <unistd.h>
++#include <stdio.h>
++#include <sys/types.h>
++#include <string.h>
++#include <stdlib.h>
++#include <fcntl.h>
++#include <errno.h>
++#include <sys/ioctl.h>
++#include <sys/stat.h>
++#include <sys/types.h>
++#include <sys/mman.h>
++#include <sys/vfs.h>
++#include <libgen.h>
++#include <rte_mbuf.h>
++
++#include "rte_pci.h"
++#include "rte_memzone.h"
++
++#include "rte_eth_dpaa2_pvt.h"
++#include "fsl_qbman_portal.h"
++#include <fsl_dpbp.h>
++
++#include <rte_log.h>
++#include "dpaa2_logs.h"
++
++static struct dpbp_node *g_dpbp_list;
++static struct dpbp_node *avail_dpbp;
++
++struct bp_info bpid_info[MAX_BPID];
++
++struct dpaa2_bp_list *h_bp_list;
++
++int
++dpaa2_create_dpbp_device(
++ int dpbp_id)
++{
++ struct dpbp_node *dpbp_node;
++ int ret;
++
++ /* Allocate DPAA2 dpbp handle */
++ dpbp_node = (struct dpbp_node *)malloc(sizeof(struct dpbp_node));
++ if (!dpbp_node) {
++ PMD_DRV_LOG(ERR, "Memory allocation failed for DPBP Device\n");
++ return -1;
++ }
++
++ /* Open the dpbp object */
++ dpbp_node->dpbp.regs = mcp_ptr_list[MC_PORTAL_INDEX];
++ ret = dpbp_open(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Resource allocation failure with err code: %d",
++ ret);
++ free(dpbp_node);
++ return -1;
++ }
++
++ /* Clean the device first */
++ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure cleaning dpbp device with"
++ "error code %d\n", ret);
++ return -1;
++ }
++
++ dpbp_node->dpbp_id = dpbp_id;
++ /* Add the dpbp handle into the global list */
++ dpbp_node->next = g_dpbp_list;
++ g_dpbp_list = dpbp_node;
++ avail_dpbp = g_dpbp_list;
++
++ PMD_DRV_LOG(INFO, "Buffer resource initialized\n");
++
++ return 0;
++}
++
++int hw_mbuf_create_pool(struct rte_mempool *mp)
++{
++ struct dpaa2_bp_list *bp_list;
++ struct dpbp_attr dpbp_attr;
++ int ret;
++
++ if (!avail_dpbp) {
++ PMD_DRV_LOG(ERR, "DPAA2 resources not available\n");
++ return -1;
++ }
++
++ ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
++ if (ret != 0) {
++ PMD_DRV_LOG(ERR, "Resource enable failure with"
++ "err code: %d\n", ret);
++ return -1;
++ }
++
++ ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
++ avail_dpbp->token, &dpbp_attr);
++ if (ret != 0) {
++ PMD_DRV_LOG(ERR, "Resource read failure with"
++ "err code: %d\n", ret);
++ ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW,
++ avail_dpbp->token);
++ return -1;
++ }
++
++ /* Allocate the bp_list which will be added into global_bp_list */
++ bp_list = (struct dpaa2_bp_list *)malloc(sizeof(struct dpaa2_bp_list));
++ if (!bp_list) {
++ PMD_DRV_LOG(ERR, "No heap memory available\n");
++ return -1;
++ }
++
++ /* Set parameters of buffer pool list */
++ bp_list->buf_pool.num_bufs = mp->size;
++ bp_list->buf_pool.size = mp->elt_size
++ - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
++ bp_list->buf_pool.bpid = dpbp_attr.bpid;
++ bp_list->buf_pool.h_bpool_mem = NULL;
++ bp_list->buf_pool.mp = mp;
++ bp_list->buf_pool.dpbp_node = avail_dpbp;
++ bp_list->next = h_bp_list;
++
++ mp->offload_ptr = dpbp_attr.bpid;
++
++ /* Increment the available DPBP */
++ avail_dpbp = avail_dpbp->next;
++
++ bpid_info[dpbp_attr.bpid].size = bp_list->buf_pool.size;
++ bpid_info[dpbp_attr.bpid].meta_data_size = sizeof(struct rte_mbuf)
++ + rte_pktmbuf_priv_size(mp);
++ bpid_info[dpbp_attr.bpid].bp_list = bp_list;
++
++ PMD_DRV_LOG(INFO, "BP List created for bpid =%d\n", dpbp_attr.bpid);
++
++ h_bp_list = bp_list;
++ return 0;
++}
++
++static inline void dpaa2_mbuf_release(uint64_t buf, uint32_t bpid)
++{
++ struct qbman_release_desc releasedesc;
++ struct qbman_swp *swp;
++ int ret;
++
++ if (!thread_io_info.dpio_dev) {
++ ret = dpaa2_affine_qbman_swp();
++ if (ret != 0) {
++ PMD_DRV_LOG(ERR, "Failed to allocate IO portal");
++ return;
++ }
++ }
++ swp = thread_io_info.dpio_dev->sw_portal;
++
++ /* Create a release descriptor required for releasing
++ * buffers into BMAN */
++ qbman_release_desc_clear(&releasedesc);
++ qbman_release_desc_set_bpid(&releasedesc, bpid);
++
++ DPAA2_MODIFY_VADDR_TO_IOVA(buf, uint64_t);
++ do {
++ /* Release buffer into the BMAN */
++ ret = qbman_swp_release(swp, &releasedesc, &buf, 1);
++ } while (ret == -EBUSY);
++ PMD_TX_FREE_LOG(DEBUG, "Released %p address to BMAN\n", buf);
++}
++
++int hw_mbuf_alloc(struct rte_mempool *mp, void **mb)
++{
++ struct qbman_swp *swp;
++ uint16_t bpid;
++ uint64_t buf;
++ int ret;
++ struct rte_mbuf *m;
++
++ if ((mp->offload_ptr > MAX_BPID) ||
++ !(bpid_info[mp->offload_ptr].bp_list)) {
++
++ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n");
++ return -2;
++ }
++
++ bpid = mp->offload_ptr;
++
++ if (!thread_io_info.dpio_dev) {
++ ret = dpaa2_affine_qbman_swp();
++ if (ret != 0) {
++ PMD_DRV_LOG(ERR, "Failed to allocate IO portal");
++ return -1;
++ }
++ }
++ swp = thread_io_info.dpio_dev->sw_portal;
++
++ do {
++ ret = qbman_swp_acquire(swp, bpid, &buf, 1);
++ } while (ret == -EBUSY);
++ if (ret <= 0) {
++ PMD_DRV_LOG(INFO, "Buffer alloc(bpid %d)fail: err: %x",
++ bpid, ret);
++ return -1;
++ }
++ DPAA2_MODIFY_IOVA_TO_VADDR(buf, uint64_t);
++
++ PMD_DRV_LOG(INFO, "Acquired %p address from BMAN\n", buf);
++ m = (struct rte_mbuf *)DPAA2_INLINE_MBUF_FROM_BUF(buf);
++ RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
++ rte_mbuf_refcnt_set(m, 1);
++ *mb = m;
++ return 0;
++}
++
++int hw_mbuf_free(void __rte_unused *m)
++{
++ struct rte_mbuf *mb = (struct rte_mbuf *)m;
++ if ((mb->pool->offload_ptr > MAX_BPID) ||
++ !(bpid_info[mb->pool->offload_ptr].bp_list)) {
++
++ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n");
++ return -1;
++ }
++
++ dpaa2_mbuf_release((uint64_t)DPAA2_BUF_FROM_INLINE_MBUF(m),
++ mb->pool->offload_ptr);
++ return 0;
++}
++
++int hw_mbuf_alloc_bulk(struct rte_mempool *pool,
++ void **obj_table, unsigned count)
++{
++ static int alloc;
++ struct qbman_swp *swp;
++ uint32_t mbuf_size;
++ uint16_t bpid;
++ uint64_t bufs[64];
++ int ret;
++ unsigned i, n = 0;
++ struct rte_mbuf **mt = (struct rte_mbuf **)obj_table;
++
++ //PMD_DRV_LOG(DEBUG, MBUF, "%s/n", __func__);
++ if ((pool->offload_ptr > MAX_BPID) ||
++ !(bpid_info[pool->offload_ptr].bp_list)) {
++
++ printf("\nDPAA2 buffer pool not configured\n");
++ return -2;
++ }
++
++ bpid = pool->offload_ptr;
++
++ if (!thread_io_info.dpio_dev) {
++ ret = dpaa2_affine_qbman_swp();
++ if (ret != 0) {
++ PMD_DRV_LOG(ERR, "Failed to allocate IO portal");
++ return -1;
++ }
++ }
++ swp = thread_io_info.dpio_dev->sw_portal;
++
++ /* if number of buffers requested is less than 7 */
++ if (count < DPAA2_MBUF_MAX_ACQ_REL) {
++ ret = qbman_swp_acquire(swp, bpid, &bufs[n], count);
++ if (ret <= 0){
++ PMD_DRV_LOG(ERR, "Failed to allocate buffers %d", ret);
++ return -1;
++ }
++ n = ret;
++ goto set_buf;
++ }
++
++ while (n < count) {
++ ret = 0;
++ /* Acquire is all-or-nothing, so we drain in 7s,
++ * then in 1s for the remainder. */
++ if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
++ ret = qbman_swp_acquire(swp, bpid, &bufs[n],
++ DPAA2_MBUF_MAX_ACQ_REL);
++ if (ret == DPAA2_MBUF_MAX_ACQ_REL) {
++ n += ret;
++ }
++ }
++ if (ret < DPAA2_MBUF_MAX_ACQ_REL) {
++ ret = qbman_swp_acquire(swp, bpid, &bufs[n], 1);
++ if (ret > 0) {
++ PMD_DRV_LOG(DEBUG, "Drained buffer: %x",
++ bufs[n]);
++ n += ret;
++ }
++ }
++ if (ret < 0) {
++ PMD_DRV_LOG(WARNING, "Buffer aquire failed with"
++ "err code: %d", ret);
++ break;
++ }
++ }
++ if (ret < 0 || n == 0){
++ PMD_DRV_LOG(ERR, "Failed to allocate buffers %d", ret);
++ return -1;
++ }
++set_buf:
++
++ mbuf_size = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(pool);
++
++ for (i = 0; i < n; i++ ) {
++
++ DPAA2_MODIFY_IOVA_TO_VADDR(buf[i], uint64_t);
++
++ mt[i] = (struct rte_mbuf *)(bufs[i] - mbuf_size);
++ PMD_DRV_LOG(DEBUG,"Acquired %p address %p from BMAN\n", (void *)bufs[i], (void *)mt[i]);
++ if (!bufs[i] || !mt[i]) {
++ printf("\n ??????? how come we have a null buffer %p, %p",
++ (void *)bufs[i], (void *)mt[i]);
++ }
++ }
++
++ alloc +=n;
++ PMD_DRV_LOG(DEBUG, "Total = %d , req = %d done = %d",
++ alloc, count, n);
++ return 0;
++}
++
++int hw_mbuf_free_bulk(struct rte_mempool *pool, void * const *obj_table,
++ unsigned n)
++{
++ unsigned i;
++ struct rte_mbuf *m;
++ //PMD_DRV_LOG(INFO, "%s/n", __func__);
++ if ((pool->offload_ptr > MAX_BPID) ||
++ !(bpid_info[pool->offload_ptr].bp_list)) {
++
++ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n");
++ return -1;
++ }
++ for (i = 0; i < n; i++) {
++ m = (struct rte_mbuf *)(obj_table[i]);
++ dpaa2_mbuf_release((uint64_t)m->buf_addr, pool->offload_ptr);
++ }
++
++ return 0;
++}
++
++int hw_mbuf_init(
++ struct rte_mempool *mp,
++ void *_m)
++{
++ struct rte_mbuf *m = (struct rte_mbuf *)((unsigned char *)_m + DPAA2_FD_PTA_SIZE +
++ DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES);
++ uint32_t mbuf_size, buf_len, priv_size, head_size;
++ uint32_t bpid;
++
++ if ((mp->offload_ptr > MAX_BPID) ||
++ !(bpid_info[mp->offload_ptr].bp_list)) {
++
++ PMD_DRV_LOG(WARNING, "DPAA2 buffer pool not configured\n");
++ return -1;
++ }
++ /*todo - assuming that h_bp_list will be at top node*/
++ bpid = mp->offload_ptr;
++
++ priv_size = rte_pktmbuf_priv_size(mp);
++ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
++
++ RTE_MBUF_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
++ RTE_MBUF_ASSERT(mp->elt_size >= mbuf_size);
++
++ memset(_m, 0, mp->elt_size);
++
++ /*update it in global list as well */
++ bpid_info[bpid].meta_data_size = DPAA2_RES;
++
++/* head_size = DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION
++ + RTE_PKTMBUF_HEADROOM;
++ head_size = DPAA2_ALIGN_ROUNDUP(head_size,
++ DPAA2_PACKET_LAYOUT_ALIGN);
++ head_size -= DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION;
++*/
++ head_size = RTE_PKTMBUF_HEADROOM;
++
++ buf_len = rte_pktmbuf_data_room_size(mp)
++ - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES /* dummy */);
++
++ RTE_MBUF_ASSERT(buf_len <= UINT16_MAX);
++
++ /* start of buffer is after mbuf structure and priv data */
++ m->priv_size = priv_size;
++ m->buf_addr = (char *)m + mbuf_size ;
++ m->buf_physaddr = rte_mempool_virt2phy(mp, _m) + DPAA2_FD_PTA_SIZE +
++ DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES + mbuf_size;
++ m->buf_len = (uint16_t)buf_len;
++
++ /* keep some headroom between start of buffer and data */
++ m->data_off = RTE_MIN(head_size, (uint16_t)m->buf_len);
++ /* init some constant fields */
++ m->pool = mp;
++ m->nb_segs = 1;
++ m->port = 0xff;
++
++ /* Release the mempool buffer to BMAN */
++ dpaa2_mbuf_release((uint64_t)_m, bpid);
++ return 0;
++}
++
+diff --git a/drivers/net/dpaa2/rte_eth_dpio.c b/drivers/net/dpaa2/rte_eth_dpio.c
+new file mode 100644
+index 0000000..23f0b08
+--- /dev/null
++++ b/drivers/net/dpaa2/rte_eth_dpio.c
+@@ -0,0 +1,339 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <unistd.h>
++#include <stdio.h>
++#include <sys/types.h>
++#include <string.h>
++#include <stdlib.h>
++#include <fcntl.h>
++#include <errno.h>
++#include <sys/ioctl.h>
++#include <sys/stat.h>
++#include <sys/types.h>
++#include <sys/mman.h>
++#include <sys/vfs.h>
++#include <libgen.h>
++
++#include "rte_pci.h"
++#include "rte_memzone.h"
++#include <rte_malloc.h>
++
++#include "rte_eth_dpaa2_pvt.h"
++#include "fsl_qbman_portal.h"
++#include <fsl_dpio.h>
++
++#include <rte_log.h>
++#include "dpaa2_logs.h"
++
++#define NUM_HOST_CPUS RTE_MAX_LCORE
++
++__thread struct thread_io_info_t thread_io_info;
++
++TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev);
++static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */
++static uint32_t io_space_count;
++
++/*Stashing Macros*/
++#define DPAA2_CORE_CLUSTER_BASE 0x04
++#define DPAA2_CORE_CLUSTER_FIRST (DPAA2_CORE_CLUSTER_BASE + 0)
++#define DPAA2_CORE_CLUSTER_SECOND (DPAA2_CORE_CLUSTER_BASE + 1)
++#define DPAA2_CORE_CLUSTER_THIRD (DPAA2_CORE_CLUSTER_BASE + 2)
++#define DPAA2_CORE_CLUSTER_FOURTH (DPAA2_CORE_CLUSTER_BASE + 3)
++
++#define DPAA2_CORE_CLUSTER_GET(sdest, cpu_id) \
++do { \
++ if (cpu_id == 0 || cpu_id == 1) \
++ sdest = DPAA2_CORE_CLUSTER_FIRST; \
++ else if (cpu_id == 2 || cpu_id == 3) \
++ sdest = DPAA2_CORE_CLUSTER_SECOND; \
++ else if (cpu_id == 4 || cpu_id == 5) \
++ sdest = DPAA2_CORE_CLUSTER_THIRD; \
++ else \
++ sdest = DPAA2_CORE_CLUSTER_FOURTH; \
++} while (0)
++
++static int
++configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
++{
++ struct qbman_swp_desc p_des;
++ struct dpio_attr attr;
++
++ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
++ if (!dpio_dev->dpio) {
++ PMD_DRV_LOG(ERR, "Memory allocation failure\n");
++ return -1;
++ }
++
++ PMD_DRV_LOG(INFO, "\t Alocated DPIO[%p]\n", dpio_dev->dpio);
++ dpio_dev->dpio->regs = dpio_dev->mc_portal;
++ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
++ &dpio_dev->token)) {
++ PMD_DRV_LOG(ERR, "Failed to allocate IO space\n");
++ free(dpio_dev->dpio);
++ return -1;
++ }
++
++ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
++ PMD_DRV_LOG(ERR, "Failed to Enable dpio\n");
++ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
++ free(dpio_dev->dpio);
++ return -1;
++ }
++
++ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
++ dpio_dev->token, &attr)) {
++ PMD_DRV_LOG(ERR, "DPIO Get attribute failed\n");
++ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
++ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
++ free(dpio_dev->dpio);
++ return -1;
++ }
++
++ PMD_DRV_LOG(INFO, "Qbman Portal ID %d\n", attr.qbman_portal_id);
++ PMD_DRV_LOG(INFO, "Portal CE addr 0x%lX\n", attr.qbman_portal_ce_offset);
++ PMD_DRV_LOG(INFO, "Portal CI addr 0x%lX\n", attr.qbman_portal_ci_offset);
++
++ /* Configure & setup SW portal */
++ p_des.block = NULL;
++ p_des.idx = attr.qbman_portal_id;
++ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
++ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
++ p_des.irq = -1;
++ p_des.qman_version = attr.qbman_version;
++
++ PMD_DRV_LOG(INFO, "Portal CE addr 0x%p\n", p_des.cena_bar);
++ PMD_DRV_LOG(INFO, "Portal CI addr 0x%p\n", p_des.cinh_bar);
++
++ dpio_dev->sw_portal = qbman_swp_init(&p_des);
++ if (dpio_dev->sw_portal == NULL) {
++ PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n");
++ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
++ free(dpio_dev->dpio);
++ return -1;
++ }
++
++ PMD_DRV_LOG(INFO, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal);
++
++ return 0;
++}
++
++int dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
++{
++ int sdest;
++ int cpu_id, ret;
++
++ /* Set the Stashing Destination */
++ cpu_id = rte_lcore_id();
++ if (cpu_id < 0) {
++ cpu_id = rte_get_master_lcore();
++ if (cpu_id < 0) {
++ PMD_DRV_LOG(ERR, "\tGetting CPU Index failed\n");
++ return -1;
++ }
++ }
++
++ /*
++ * In case of running DPDK on the Virtual Machine the Stashing
++ * Destination gets set in the H/W w.r.t. the Virtual CPU ID's.
++ * As a W.A. environment variable HOST_START_CPU tells which
++ * the offset of the host start core of the Virtual Machine threads.
++ */
++ if (getenv("HOST_START_CPU")) {
++ cpu_id +=
++ atoi(getenv("HOST_START_CPU"));
++ cpu_id = cpu_id % NUM_HOST_CPUS;
++ }
++
++ /* Set the STASH Destination depending on Current CPU ID.
++ Valid values of SDEST are 4,5,6,7. Where,
++ CPU 0-1 will have SDEST 4
++ CPU 2-3 will have SDEST 5.....and so on.
++ */
++ DPAA2_CORE_CLUSTER_GET(sdest, cpu_id);
++ PMD_DRV_LOG(INFO, "Portal= %d CPU= %u SDEST= %d\n",
++ dpio_dev->index, cpu_id, sdest);
++
++ ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
++ dpio_dev->token, sdest);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret);
++ return -1;
++ }
++
++ return 0;
++}
++
++int
++dpaa2_affine_qbman_swp(void)
++{
++ struct dpaa2_dpio_dev *dpio_dev = NULL;
++ int ret;
++
++ if (thread_io_info.dpio_dev)
++ return 0;
++
++ /* Get DPIO dev handle from list using index */
++ TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
++ if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
++ break;
++ }
++ if (!dpio_dev)
++ return -1;
++
++ /* Populate the thread_io_info structure */
++ thread_io_info.dpio_dev = dpio_dev;
++
++ ret = dpaa2_configure_stashing(dpio_dev);
++ if (ret) {
++ RTE_LOG(ERR, EAL, "dpaa2_configure_stashing failed");
++ }
++ return ret;
++}
++
++int
++dpaa2_affine_qbman_swp_sec(void)
++{
++ struct dpaa2_dpio_dev *dpio_dev = NULL;
++ int ret;
++
++ if (thread_io_info.sec_dpio_dev)
++ return 0;
++
++ /* Get DPIO dev handle from list using index */
++ TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
++ if (dpio_dev && rte_atomic16_read(&dpio_dev->ref_count) == 0) {
++ rte_atomic16_inc(&dpio_dev->ref_count);
++ break;
++ }
++ }
++ if (!dpio_dev)
++ return -1;
++
++ /* Populate the thread_io_info structure */
++ thread_io_info.sec_dpio_dev = dpio_dev;
++
++ ret = dpaa2_configure_stashing(dpio_dev);
++ if (ret) {
++ RTE_LOG(ERR, EAL, "dpaa2_configure_stashing failed");
++ }
++ return ret;
++}
++
++int
++dpaa2_create_dpio_device(struct vfio_device *vdev,
++ struct vfio_device_info *obj_info,
++ int object_id)
++{
++ struct dpaa2_dpio_dev *dpio_dev;
++ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
++
++ if (obj_info->num_regions < NUM_DPIO_REGIONS) {
++ PMD_DRV_LOG(ERR, "ERROR, Not sufficient number "
++ "of DPIO regions.\n");
++ return -1;
++ }
++
++ if (!dpio_dev_list) {
++ dpio_dev_list = malloc(sizeof(struct dpio_device_list));
++ if (NULL == dpio_dev_list) {
++ PMD_DRV_LOG(ERR, "Memory allocation failed for DPIO list\n");
++ return -1;
++ }
++
++ /* Initialize the DPIO List */
++ TAILQ_INIT(dpio_dev_list);
++ }
++
++ dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev));
++ if (!dpio_dev) {
++ PMD_DRV_LOG(ERR, "Memory allocation failed for DPIO Device\n");
++ return -1;
++ }
++
++ PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]\n", dpio_dev);
++ dpio_dev->dpio = NULL;
++ dpio_dev->hw_id = object_id;
++ dpio_dev->vfio_fd = vdev->fd;
++ rte_atomic16_init(&dpio_dev->ref_count);
++ /* Using single portal for all devices */
++ dpio_dev->mc_portal = mcp_ptr_list[MC_PORTAL_INDEX];
++
++ reg_info.index = 0;
++ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
++ printf("vfio: error getting region info\n");
++ return -1;
++ }
++
++ PMD_DRV_LOG(INFO, "\t Region Offset = %llx\n", reg_info.offset);
++ PMD_DRV_LOG(INFO, "\t Region Size = %llx\n", reg_info.size);
++ dpio_dev->ce_size = reg_info.size;
++ dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
++ PROT_WRITE | PROT_READ, MAP_SHARED,
++ dpio_dev->vfio_fd, reg_info.offset);
++
++ /* Create Mapping for QBMan Cache Enabled area. This is a fix for
++ SMMU fault for DQRR statshing transaction. */
++ if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr,
++ reg_info.offset, reg_info.size)) {
++ PMD_DRV_LOG(ERR, "DMAMAP for Portal CE area failed.\n");
++ return -1;
++ }
++
++ reg_info.index = 1;
++ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
++ printf("vfio: error getting region info\n");
++ return -1;
++ }
++
++ PMD_DRV_LOG(INFO, "\t Region Offset = %llx\n", reg_info.offset);
++ PMD_DRV_LOG(INFO, "\t Region Size = %llx\n", reg_info.size);
++ dpio_dev->ci_size = reg_info.size;
++ dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
++ PROT_WRITE | PROT_READ, MAP_SHARED,
++ dpio_dev->vfio_fd, reg_info.offset);
++
++ if (configure_dpio_qbman_swp(dpio_dev)) {
++ PMD_DRV_LOG(ERR,
++ "Failed in configuring the qbman portal for dpio %d\n",
++ dpio_dev->hw_id);
++ return -1;
++ }
++
++ io_space_count++;
++ dpio_dev->index = io_space_count;
++ TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next);
++
++ return 0;
++}
++
+diff --git a/drivers/net/dpaa2/rte_eth_dpni.c b/drivers/net/dpaa2/rte_eth_dpni.c
+new file mode 100644
+index 0000000..62baf03
+--- /dev/null
++++ b/drivers/net/dpaa2/rte_eth_dpni.c
+@@ -0,0 +1,2230 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <time.h>
++#include <rte_mbuf.h>
++#include <rte_ethdev.h>
++#include <rte_malloc.h>
++#include <rte_memcpy.h>
++#include <rte_string_fns.h>
++#include <rte_cycles.h>
++#include <rte_kvargs.h>
++#include <rte_dev.h>
++
++#include <net/if.h>
++/* MC header files */
++#include <fsl_dpbp.h>
++#include <fsl_dpni.h>
++#include "rte_eth_dpaa2_pvt.h"
++#include "rte_eth_dpni_annot.h"
++#include "dpaa2_logs.h"
++
++#include <fsl_qbman_portal.h>
++#include <fsl_dpio.h>
++
++#define DPAA2_STASHING
++
++/* tx fd send batching */
++#define QBMAN_MULTI_TX
++
++#define RTE_ETH_DPAA2_SNAPSHOT_LEN 65535
++#define RTE_ETH_DPAA2_SNAPLEN 4096
++#define RTE_ETH_DPAA2_PROMISC 1
++#define RTE_ETH_DPAA2_TIMEOUT -1
++#define ETH_DPAA2_RX_IFACE_ARG "rx_iface"
++#define ETH_DPAA2_TX_IFACE_ARG "tx_iface"
++#define ETH_DPAA2_IFACE_ARG "iface"
++
++static const char *drivername = "DPNI PMD";
++
++#define MAX_TCS DPNI_MAX_TC
++#define MAX_RX_QUEUES 64
++#define MAX_TX_QUEUES 64
++
++/*Maximum number of slots available in TX ring*/
++#define MAX_SLOTS 8
++
++/*Threshold for a queue to *Enter* Congestion state.
++ It is set to 128 frames of size 64 bytes.*/
++#define CONG_ENTER_THRESHOLD 128*64
++
++/*Threshold for a queue to *Exit* Congestion state.
++ It is set to 98 frames of size 64 bytes*/
++#define CONG_EXIT_THRESHOLD 98*64
++
++/*! Maximum number of flow distributions per traffic class */
++#define MAX_DIST_PER_TC 16
++
++/* Size of the input SMMU mapped memory required by MC */
++#define DIST_PARAM_IOVA_SIZE 256
++
++struct dpaa2_queue {
++ void *dev;
++ int32_t eventfd; /*!< Event Fd of this queue */
++ uint32_t fqid; /*!< Unique ID of this queue */
++ uint8_t tc_index; /*!< traffic class identifier */
++ uint16_t flow_id; /*!< To be used by DPAA2 frmework */
++ uint64_t rx_pkts;
++ uint64_t tx_pkts;
++ uint64_t err_pkts;
++ union {
++ struct queue_storage_info_t *q_storage;
++ struct qbman_result *cscn;
++ };
++};
++
++struct dpaa2_dev_priv {
++ void *hw;
++ int32_t hw_id;
++ int32_t qdid;
++ uint16_t token;
++ uint8_t nb_tx_queues;
++ uint8_t nb_rx_queues;
++ void *rx_vq[MAX_RX_QUEUES];
++ void *tx_vq[MAX_TX_QUEUES];
++
++ struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
++ uint16_t num_dist_per_tc[MAX_TCS];
++
++ uint8_t max_unicast_filters;
++ uint8_t max_multicast_filters;
++ uint8_t max_vlan_filters;
++ uint8_t num_tc;
++ uint32_t options;
++};
++
++static struct rte_pci_id pci_id_dpaa2_map[] = {
++ {RTE_PCI_DEVICE(FSL_VENDOR_ID, FSL_MC_DPNI_DEVID)},
++};
++
++extern struct bp_info bpid_info[MAX_BPID];
++
++static void dpaa2_print_stats(struct rte_eth_dev *dev)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ uint64_t value;
++
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME, &value);
++ printf("Rx packets: %ld\n", value);
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_BYTE, &value);
++ printf("Rx bytes: %ld\n", value);
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_MCAST_FRAME, &value);
++ printf("Rx Multicast: %ld\n", value);
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME_DROP, &value);
++ printf("Rx dropped: %ld\n", value);
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME_DISCARD, &value);
++ printf("Rx discarded: %ld\n", value);
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_FRAME, &value);
++ printf("Tx packets: %ld\n", value);
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_BYTE, &value);
++ printf("Tx bytes: %ld\n", value);
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_FRAME_DISCARD, &value);
++ printf("Tx dropped: %ld\n", value);
++}
++
++/**
++ * Atomically reads the link status information from global
++ * structure rte_eth_dev.
++ *
++ * @param dev
++ * - Pointer to the structure rte_eth_dev to read from.
++ * - Pointer to the buffer to be saved with the link status.
++ *
++ * @return
++ * - On success, zero.
++ * - On failure, negative value.
++ */
++static inline int
++rte_dpni_dev_atomic_read_link_status(struct rte_eth_dev *dev,
++ struct rte_eth_link *link)
++{
++ struct rte_eth_link *dst = link;
++ struct rte_eth_link *src = &dev->data->dev_link;
++
++ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
++ *(uint64_t *)src) == 0)
++ return -1;
++
++ return 0;
++}
++
++/**
++ * Atomically writes the link status information into global
++ * structure rte_eth_dev.
++ *
++ * @param dev
++ * - Pointer to the structure rte_eth_dev to read from.
++ * - Pointer to the buffer to be saved with the link status.
++ *
++ * @return
++ * - On success, zero.
++ * - On failure, negative value.
++ */
++static inline int
++rte_dpni_dev_atomic_write_link_status(struct rte_eth_dev *dev,
++ struct rte_eth_link *link)
++{
++ struct rte_eth_link *dst = &dev->data->dev_link;
++ struct rte_eth_link *src = link;
++
++ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
++ *(uint64_t *)src) == 0)
++ return -1;
++
++ return 0;
++}
++
++static inline void
++dpaa2_eth_parse_packet(struct rte_mbuf *mbuf)
++{
++ uint32_t pkt_type = 0;
++ struct pkt_annotation *annotation = (struct pkt_annotation *)
++ ((uint8_t *)mbuf - (DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES));
++
++ PMD_DRV_LOG(DEBUG, "\n 1 annotation = 0x%x ", annotation->word4);
++
++ if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT))
++ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L2_ETHER;
++
++ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT))
++ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV4;
++
++ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT))
++ pkt_type /* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV6;
++
++ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT))
++ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV4_EXT;
++
++ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
++ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_UDP;
++
++ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
++ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_TCP;
++
++ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
++ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_SCTP;
++
++ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
++ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_ICMP;
++
++ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
++ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_UNKNOWN;
++
++ mbuf->packet_type = pkt_type;
++}
++
++static inline
++struct rte_mbuf *eth_fd_to_mbuf(const struct qbman_fd *fd)
++{
++ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(DPAA2_GET_FD_ADDR(fd));
++
++ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p",
++ (void *)mbuf, mbuf->buf_addr);
++
++ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n",
++ DPAA2_GET_FD_ADDR(fd),
++ DPAA2_GET_FD_BPID(fd),
++ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
++ DPAA2_GET_FD_OFFSET(fd),
++ DPAA2_GET_FD_LEN(fd));
++
++// mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
++ mbuf->data_len = DPAA2_GET_FD_LEN(fd);
++ mbuf->pkt_len = mbuf->data_len;
++ mbuf->next = NULL;
++ rte_mbuf_refcnt_set(mbuf, 1);
++
++ /* Parse the packet */
++ dpaa2_eth_parse_packet(mbuf);
++
++ mbuf->nb_segs = 1;
++ mbuf->ol_flags = 0;
++
++ return mbuf;
++}
++
++static void __attribute__ ((noinline)) eth_mbuf_to_fd(struct rte_mbuf *mbuf,
++ struct qbman_fd *fd, uint16_t bpid)
++{
++ /*Resetting the buffer pool id and offset field*/
++ fd->simple.bpid_offset = 0;
++
++ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(DPAA2_BUF_FROM_INLINE_MBUF(mbuf)));
++ DPAA2_SET_FD_LEN(fd, mbuf->data_len);
++ DPAA2_SET_FD_BPID(fd, bpid);
++ DPAA2_SET_FD_OFFSET(fd, DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION +
++ DPAA2_RES /* dummy */+ 128 + mbuf->priv_size + mbuf->data_off);
++ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
++
++ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p",
++ (void *)mbuf, mbuf->buf_addr);
++
++ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n",
++ DPAA2_GET_FD_ADDR(fd),
++ DPAA2_GET_FD_BPID(fd),
++ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
++ DPAA2_GET_FD_OFFSET(fd),
++ DPAA2_GET_FD_LEN(fd));
++
++ return;
++}
++
++static int eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
++ struct qbman_fd *fd, uint16_t bpid)
++{
++ struct rte_mbuf *m;
++ void *mb = NULL;
++
++ if (hw_mbuf_alloc(bpid_info[bpid].bp_list->buf_pool.mp, &mb)) {
++ PMD_DRV_LOG(WARNING, "Unable to allocated DPAA2 buffer");
++ rte_pktmbuf_free(mbuf);
++ return -1;
++ }
++ m = (struct rte_mbuf *)mb;
++ memcpy((char *)m->buf_addr + mbuf->data_off,
++ (void *)((char *)mbuf->buf_addr + mbuf->data_off),
++ mbuf->pkt_len);
++
++ /*Resetting the buffer pool id and offset field*/
++ fd->simple.bpid_offset = 0;
++
++ DPAA2_SET_FD_ADDR(fd, m->buf_addr);
++ DPAA2_SET_FD_LEN(fd, mbuf->data_len);
++ DPAA2_SET_FD_BPID(fd, bpid);
++ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
++ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
++
++ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p",
++ (void *)mbuf, mbuf->buf_addr);
++
++ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n",
++ DPAA2_GET_FD_ADDR(fd),
++ DPAA2_GET_FD_BPID(fd),
++ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
++ DPAA2_GET_FD_OFFSET(fd),
++ DPAA2_GET_FD_LEN(fd));
++ /*free the original packet */
++ rte_pktmbuf_free(mbuf);
++
++ return 0;
++}
++
++static uint16_t
++eth_dpaa2_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
++{
++ /* Function is responsible to receive frames for a given device and VQ*/
++ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
++ struct qbman_result *dq_storage;
++ uint32_t fqid = dpaa2_q->fqid;
++ int ret, num_rx = 0;
++ uint8_t is_last = 0, status;
++ struct qbman_swp *swp;
++ const struct qbman_fd *fd;
++ struct qbman_pull_desc pulldesc;
++ struct rte_eth_dev *dev = dpaa2_q->dev;
++
++ if (!thread_io_info.dpio_dev) {
++ ret = dpaa2_affine_qbman_swp();
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
++ return 0;
++ }
++ }
++ swp = thread_io_info.dpio_dev->sw_portal;
++ dq_storage = dpaa2_q->q_storage->dq_storage[0];
++
++ qbman_pull_desc_clear(&pulldesc);
++ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts);
++ qbman_pull_desc_set_fq(&pulldesc, fqid);
++ /* todo optimization - we can have dq_storage_phys available*/
++ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
++ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
++
++ /*Issue a volatile dequeue command. */
++ while (1) {
++ if (qbman_swp_pull(swp, &pulldesc)) {
++ PMD_DRV_LOG(ERR, "VDQ command is not issued."
++ "QBMAN is busy\n");
++ /* Portal was busy, try again */
++ continue;
++ }
++ break;
++ };
++
++ /* Receive the packets till Last Dequeue entry is found with
++ respect to the above issues PULL command.
++ */
++ while (!is_last) {
++ /*Check if the previous issued command is completed.
++ *Also seems like the SWP is shared between the Ethernet Driver
++ *and the SEC driver.*/
++ while(!qbman_check_command_complete(swp, dq_storage))
++ ;
++ /* Loop until the dq_storage is updated with
++ * new token by QBMAN */
++ while (!qbman_result_has_new_result(swp, dq_storage))
++ ;
++ /* Check whether Last Pull command is Expired and
++ setting Condition for Loop termination */
++ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
++ is_last = 1;
++ /* Check for valid frame. */
++ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
++ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
++ PMD_DRV_LOG(DEBUG, "No frame is delivered\n");
++ continue;
++ }
++ }
++
++ fd = qbman_result_DQ_fd(dq_storage);
++ bufs[num_rx] = eth_fd_to_mbuf(fd);
++ bufs[num_rx]->port = dev->data->port_id;
++
++ num_rx++;
++ dq_storage++;
++ } /* End of Packet Rx loop */
++
++ dpaa2_q->rx_pkts += num_rx;
++
++ PMD_DRV_LOG(INFO, "Ethernet Received %d Packets\n", num_rx);
++ /*Return the total number of packets received to DPAA2 app*/
++ return num_rx;
++}
++
++static uint16_t
++eth_dpaa2_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
++{
++ /* Function is responsible to receive frames for a given device and VQ*/
++ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
++ struct qbman_result *dq_storage;
++ uint32_t fqid = dpaa2_q->fqid;
++ int ret, i, num_rx = 0;
++ uint8_t is_last = 0, status;
++ struct qbman_swp *swp;
++ const struct qbman_fd *fd[16];
++ struct qbman_pull_desc pulldesc;
++ struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
++ struct rte_eth_dev *dev = dpaa2_q->dev;
++
++ if(!thread_io_info.dpio_dev) {
++ ret = dpaa2_affine_qbman_swp();
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
++ return 0;
++ }
++ }
++ swp = thread_io_info.dpio_dev->sw_portal;
++
++ if(!q_storage->active_dqs) {
++ q_storage->toggle = 0;
++ dq_storage = q_storage->dq_storage[q_storage->toggle];
++ qbman_pull_desc_clear(&pulldesc);
++ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts);
++ qbman_pull_desc_set_fq(&pulldesc, fqid);
++ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
++ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
++ if(thread_io_info.global_active_dqs) {
++ while(!qbman_check_command_complete(swp, thread_io_info.global_active_dqs))
++ ;
++ }
++ while (1) {
++ if (qbman_swp_pull(swp, &pulldesc)) {
++ PMD_DRV_LOG(WARNING, "VDQ command is not issued."
++ "QBMAN is busy\n");
++ /* Portal was busy, try again */
++ continue;
++ }
++ break;
++ }
++ q_storage->active_dqs = dq_storage;
++ thread_io_info.global_active_dqs = dq_storage;
++ }
++ while(!qbman_check_command_complete(swp, thread_io_info.global_active_dqs))
++ ;
++ dq_storage = q_storage->active_dqs;
++ while (!is_last) {
++ /* Loop until the dq_storage is updated with
++ * new token by QBMAN */
++ struct rte_mbuf *mbuf;
++
++ while (!qbman_result_has_new_result(swp, dq_storage))
++ ;
++ rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
++ /* Check whether Last Pull command is Expired and
++ setting Condition for Loop termination */
++ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
++ is_last = 1;
++ /* Check for valid frame. */
++ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
++ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
++ PMD_DRV_LOG(DEBUG, "No frame is delivered\n");
++ continue;
++ }
++ }
++ fd[num_rx] = qbman_result_DQ_fd(dq_storage);
++ mbuf = DPAA2_INLINE_MBUF_FROM_BUF(DPAA2_GET_FD_ADDR(fd[num_rx]));
++ /* Prefeth mbuf */
++ rte_prefetch0(mbuf);
++ /* Prefetch Annotation address from where we get parse results */
++ rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx]) + DPAA2_FD_PTA_SIZE + 16));
++ /*Prefetch Data buffer*/
++ /* rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx]) + DPAA2_GET_FD_OFFSET(fd[num_rx]))); */
++ dq_storage++;
++ num_rx++;
++
++ } /* End of Packet Rx loop */
++
++ for (i = 0; i < num_rx; i++) {
++ bufs[i] = eth_fd_to_mbuf(fd[i]);
++ bufs[i]->port = dev->data->port_id;
++ }
++
++ q_storage->toggle ^= 1;
++ dq_storage = q_storage->dq_storage[q_storage->toggle];
++ qbman_pull_desc_clear(&pulldesc);
++ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts);
++ qbman_pull_desc_set_fq(&pulldesc, fqid);
++ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
++ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
++ /*Issue a volatile dequeue command. */
++
++ while (1) {
++ if (qbman_swp_pull(swp, &pulldesc)) {
++ PMD_DRV_LOG(WARNING, "VDQ command is not issued."
++ "QBMAN is busy\n");
++ continue;
++ }
++ break;
++ }
++ q_storage->active_dqs = dq_storage;
++ thread_io_info.global_active_dqs = dq_storage;
++
++ dpaa2_q->rx_pkts += num_rx;
++
++ PMD_DRV_LOG(INFO, "Ethernet Received %d Packets\n", num_rx);
++ /*Return the total number of packets received to DPAA2 app*/
++ return num_rx;
++}
++
++/*
++ * Callback to handle sending packets through a real NIC.
++ */
++static uint16_t
++eth_dpaa2_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
++{
++ /* Function to transmit the frames to given device and VQ*/
++ uint32_t loop;
++ int32_t ret;
++#ifdef QBMAN_MULTI_TX
++ struct qbman_fd fd_arr[8];
++ uint32_t frames_to_send;
++#else
++ struct qbman_fd fd;
++#endif
++ struct rte_mempool *mp;
++ struct qbman_eq_desc eqdesc;
++ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
++ struct qbman_swp *swp;
++ uint16_t num_tx = 0;
++ /*todo - need to support multiple buffer pools */
++ uint16_t bpid;
++ struct rte_eth_dev *dev = dpaa2_q->dev;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++
++ if (!thread_io_info.dpio_dev) {
++ ret = dpaa2_affine_qbman_swp();
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
++ return 0;
++ }
++ }
++ swp = thread_io_info.dpio_dev->sw_portal;
++
++ /*Prepare enqueue descriptor*/
++ qbman_eq_desc_clear(&eqdesc);
++ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
++ qbman_eq_desc_set_response(&eqdesc, 0, 0);
++ qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
++ dpaa2_q->flow_id, dpaa2_q->tc_index);
++
++ /*Clear the unused FD fields before sending*/
++#ifdef QBMAN_MULTI_TX
++ while(nb_pkts) {
++ /*Check if the queue is congested*/
++ if(qbman_result_is_CSCN(dpaa2_q->cscn))
++ goto skip_tx;
++
++ frames_to_send = (nb_pkts >> 3) ? MAX_SLOTS : nb_pkts;
++
++ for(loop = 0; loop < frames_to_send; loop++) {
++ fd_arr[loop].simple.frc = 0;
++ DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
++ DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
++ mp = (*bufs)->pool;
++ /* Not a hw_pkt pool allocated frame */
++ if (mp && !(mp->flags & MEMPOOL_F_HW_PKT_POOL)) {
++ printf ("\n non hw offload bufffer ");
++ /* alloc should be from the default buffer pool
++ attached to this interface */
++ bpid = priv->bp_list->buf_pool.bpid;
++ if (eth_copy_mbuf_to_fd(*bufs, &fd_arr[loop], bpid)) {
++ bufs++;
++ continue;
++ }
++ } else {
++ bpid = mp->offload_ptr;
++ eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid);
++ }
++ bufs++;
++ }
++ loop = 0;
++ while(loop < frames_to_send) {
++ loop += qbman_swp_send_multiple(swp, &eqdesc,
++ &fd_arr[loop], frames_to_send - loop);
++ }
++
++ num_tx += frames_to_send;
++ dpaa2_q->tx_pkts += frames_to_send;
++ nb_pkts -= frames_to_send;
++ }
++#else
++ /*Check if the queue is congested*/
++// if(qbman_result_is_CSCN(dpaa2_q->cscn))
++// goto skip_tx;
++
++ fd.simple.frc = 0;
++ DPAA2_RESET_FD_CTRL((&fd));
++ DPAA2_SET_FD_FLC((&fd), NULL);
++ loop = 0;
++
++ while (loop < nb_pkts) {
++ /*Prepare each packet which is to be sent*/
++ mp = bufs[loop]->pool;
++ /* Not a hw_pkt pool allocated frame */
++ if (mp && !(mp->flags & MEMPOOL_F_HW_PKT_POOL)) {
++ printf ("\n non hw offload bufffer ");
++ /* alloc should be from the default buffer pool
++ attached to this interface */
++ if (priv->bp_list)
++ bpid = priv->bp_list->buf_pool.bpid;
++ else
++ printf("\n ??? why no bpool attached");
++
++ if (eth_copy_mbuf_to_fd(bufs[loop], &fd, bpid)) {
++ loop++;
++ continue;
++ }
++ } else {
++ bpid = mp->offload_ptr;
++ eth_mbuf_to_fd(bufs[loop], &fd, bpid);
++ }
++ /*Enqueue a single packet to the QBMAN*/
++ do {
++ ret = qbman_swp_enqueue(swp, &eqdesc, &fd);
++ if (ret != 0) {
++ PMD_DRV_LOG(DEBUG, "Error in transmiting the frame\n");
++ }
++ } while (ret != 0);
++
++ /* Free the buffer shell */
++ /* rte_pktmbuf_free(bufs[loop]); */
++ num_tx++; loop++;
++ }
++ dpaa2_q->tx_pkts += num_tx;
++ dpaa2_q->err_pkts += nb_pkts - num_tx;
++#endif
++ skip_tx:
++ return num_tx;
++}
++
++static int
++dpaa2_vlan_stripping_set(struct rte_eth_dev *dev, int on)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ PMD_INIT_FUNC_TRACE();
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return -1;
++ }
++
++ ret = dpni_set_vlan_removal(dpni, CMD_PRI_LOW, priv->token, on);
++ if (ret < 0)
++ PMD_DRV_LOG(ERR, "Unable to dpni_set_vlan_removal hwid =%d",
++ priv->hw_id);
++ return ret;
++}
++
++static int
++dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return -1;
++ }
++
++ if (on)
++ ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id);
++ else
++ ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id);
++
++ if (ret < 0)
++ PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
++ ret, vlan_id, priv->hw_id);
++
++ /*todo this should on global basis */
++/* ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, on);
++ if (ret < 0)
++ PMD_DRV_LOG(ERR, "Unable to set vlan filter");
++*/ return ret;
++}
++
++static void
++dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ int ret;
++ if (mask & ETH_VLAN_FILTER_MASK) {
++ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
++ ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, TRUE);
++ else
++ ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, FALSE);
++ if (ret < 0)
++ PMD_DRV_LOG(ERR, "ret = %d Unable to set vlan filter", ret);
++ }
++
++ if (mask & ETH_VLAN_STRIP_MASK) {
++ /* Enable or disable VLAN stripping */
++ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
++ dpaa2_vlan_stripping_set(dev, TRUE);
++ else
++ dpaa2_vlan_stripping_set(dev, FALSE);
++ }
++
++ if (mask & ETH_VLAN_EXTEND_MASK) {
++ PMD_INIT_FUNC_TRACE();
++/* if (dev->data->dev_conf.rxmode.hw_vlan_extend)
++ i40e_vsi_config_double_vlan(vsi, TRUE);
++ else
++ i40e_vsi_config_double_vlan(vsi, FALSE);
++*/ }
++}
++
++static void
++dpaa2_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++
++ dev_info->driver_name = drivername;
++ dev_info->if_index = priv->hw_id;
++ dev_info->max_mac_addrs = priv->max_unicast_filters;
++ dev_info->max_rx_pktlen = (uint32_t)-1;
++ dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
++ dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
++ dev_info->min_rx_bufsize = 0;
++ dev_info->pci_dev = dev->pci_dev;
++/* dev_info->rx_offload_capa =
++ DEV_RX_OFFLOAD_IPV4_CKSUM |
++ DEV_RX_OFFLOAD_UDP_CKSUM |
++ DEV_RX_OFFLOAD_TCP_CKSUM;
++ dev_info->tx_offload_capa =
++ DEV_TX_OFFLOAD_IPV4_CKSUM |
++ DEV_TX_OFFLOAD_UDP_CKSUM |
++ DEV_TX_OFFLOAD_TCP_CKSUM |
++ DEV_TX_OFFLOAD_SCTP_CKSUM;
++*/
++}
++
++static int
++dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ uint8_t tc_idx;
++ uint16_t dist_idx;
++ uint32_t vq_id;
++ struct dpaa2_queue *mc_q, *mcq;
++ uint32_t tot_queues;
++ int i;
++ struct dpaa2_queue *dpaa2_q;
++ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
++ mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
++ RTE_CACHE_LINE_SIZE);
++ if (!mc_q) {
++ PMD_DRV_LOG(ERR, "malloc failed for rx/tx queues\n");
++ return -1;
++ }
++
++ for (i = 0; i < priv->nb_rx_queues; i++) {
++ mc_q->dev = dev;
++ priv->rx_vq[i] = mc_q++;
++ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
++ dpaa2_q->q_storage = rte_malloc("dq_storage",
++ sizeof(struct queue_storage_info_t),
++ RTE_CACHE_LINE_SIZE);
++ if(!dpaa2_q->q_storage)
++ goto fail;
++
++ memset(dpaa2_q->q_storage, 0, sizeof(struct queue_storage_info_t));
++ }
++
++ for (i = 0; i < priv->nb_tx_queues; i++) {
++ mc_q->dev = dev;
++ priv->tx_vq[i] = mc_q++;
++ }
++
++ vq_id = 0;
++ for (tc_idx = 0; tc_idx < priv->num_tc; tc_idx++) {
++ for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[tc_idx]; dist_idx++) {
++ mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
++ mcq->tc_index = tc_idx;
++ mcq->flow_id = dist_idx;
++ vq_id++;
++ }
++ }
++
++ return 0;
++fail:
++ i -= 1;
++ while(i >= 0)
++ {
++ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
++ rte_free(dpaa2_q->q_storage);
++ }
++ return -1;
++}
++
++static void dpaa2_distset_to_dpkg_profile_cfg(
++ uint32_t req_dist_set,
++ struct dpkg_profile_cfg *kg_cfg)
++{
++ uint32_t loop = 0, i = 0, dist_field = 0;
++ int l2_configured = 0, l3_configured = 0;
++ int l4_configured = 0, sctp_configured = 0;
++
++ memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
++ while (req_dist_set) {
++ if (req_dist_set % 2 != 0) {
++ dist_field = 1U << loop;
++ switch (dist_field) {
++ case ETH_RSS_L2_PAYLOAD:
++
++ if (l2_configured)
++ break;
++ l2_configured = 1;
++
++ kg_cfg->extracts[i].extract.from_hdr.prot =
++ NET_PROT_ETH;
++ kg_cfg->extracts[i].extract.from_hdr.field =
++ NH_FLD_ETH_TYPE;
++ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg->extracts[i].extract.from_hdr.type =
++ DPKG_FULL_FIELD;
++ i++;
++ break;
++
++ case ETH_RSS_IPV4:
++ case ETH_RSS_FRAG_IPV4:
++ case ETH_RSS_NONFRAG_IPV4_OTHER:
++ case ETH_RSS_IPV6:
++ case ETH_RSS_FRAG_IPV6:
++ case ETH_RSS_NONFRAG_IPV6_OTHER:
++ case ETH_RSS_IPV6_EX:
++
++ if (l3_configured)
++ break;
++ l3_configured = 1;
++
++ kg_cfg->extracts[i].extract.from_hdr.prot =
++ NET_PROT_IP;
++ kg_cfg->extracts[i].extract.from_hdr.field =
++ NH_FLD_IP_SRC;
++ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg->extracts[i].extract.from_hdr.type =
++ DPKG_FULL_FIELD;
++ i++;
++
++ kg_cfg->extracts[i].extract.from_hdr.prot =
++ NET_PROT_IP;
++ kg_cfg->extracts[i].extract.from_hdr.field =
++ NH_FLD_IP_DST;
++ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg->extracts[i].extract.from_hdr.type =
++ DPKG_FULL_FIELD;
++ i++;
++
++ kg_cfg->extracts[i].extract.from_hdr.prot =
++ NET_PROT_IP;
++ kg_cfg->extracts[i].extract.from_hdr.field =
++ NH_FLD_IP_PROTO;
++ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg->extracts[i].extract.from_hdr.type =
++ DPKG_FULL_FIELD;
++ kg_cfg->num_extracts++;
++ i++;
++ break;
++
++ case ETH_RSS_NONFRAG_IPV4_TCP:
++ case ETH_RSS_NONFRAG_IPV6_TCP:
++ case ETH_RSS_NONFRAG_IPV4_UDP:
++ case ETH_RSS_NONFRAG_IPV6_UDP:
++
++ if (l4_configured)
++ break;
++ l4_configured = 1;
++
++ kg_cfg->extracts[i].extract.from_hdr.prot =
++ NET_PROT_TCP;
++ kg_cfg->extracts[i].extract.from_hdr.field =
++ NH_FLD_TCP_PORT_SRC;
++ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg->extracts[i].extract.from_hdr.type =
++ DPKG_FULL_FIELD;
++ i++;
++
++ kg_cfg->extracts[i].extract.from_hdr.prot =
++ NET_PROT_TCP;
++ kg_cfg->extracts[i].extract.from_hdr.field =
++ NH_FLD_TCP_PORT_SRC;
++ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg->extracts[i].extract.from_hdr.type =
++ DPKG_FULL_FIELD;
++ i++;
++ break;
++
++ case ETH_RSS_NONFRAG_IPV4_SCTP:
++ case ETH_RSS_NONFRAG_IPV6_SCTP:
++
++ if (sctp_configured)
++ break;
++ sctp_configured = 1;
++
++ kg_cfg->extracts[i].extract.from_hdr.prot =
++ NET_PROT_SCTP;
++ kg_cfg->extracts[i].extract.from_hdr.field =
++ NH_FLD_SCTP_PORT_SRC;
++ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg->extracts[i].extract.from_hdr.type =
++ DPKG_FULL_FIELD;
++ i++;
++
++ kg_cfg->extracts[i].extract.from_hdr.prot =
++ NET_PROT_SCTP;
++ kg_cfg->extracts[i].extract.from_hdr.field =
++ NH_FLD_SCTP_PORT_DST;
++ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg->extracts[i].extract.from_hdr.type =
++ DPKG_FULL_FIELD;
++ i++;
++ break;
++
++ default:
++ PMD_DRV_LOG(WARNING, "Bad flow distribution option %x\n", dist_field);
++ }
++ }
++ req_dist_set = req_dist_set >> 1;
++ loop++;
++ }
++ kg_cfg->num_extracts = i;
++}
++
++static int dpaa2_setup_flow_distribution(struct rte_eth_dev *eth_dev,
++ uint32_t req_dist_set)
++{
++ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
++ struct fsl_mc_io *dpni = priv->hw;
++ struct dpni_rx_tc_dist_cfg tc_cfg;
++ struct dpkg_profile_cfg kg_cfg;
++ void *p_params;
++ int ret, tc_index = 0;
++
++ p_params = rte_malloc(
++ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
++ if (!p_params) {
++ PMD_DRV_LOG(ERR, "Memory unavaialble\n");
++ return -ENOMEM;
++ }
++ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
++ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
++
++ dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
++ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
++ tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
++ tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++
++ ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Unable to prepare extract parameters\n");
++ rte_free(p_params);
++ return ret;
++ }
++
++ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
++ &tc_cfg);
++ rte_free(p_params);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Setting distribution for Rx failed with"
++ "err code: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int
++dpaa2_remove_flow_distribution(struct rte_eth_dev *eth_dev, uint8_t tc_index)
++{
++ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
++ struct fsl_mc_io *dpni = priv->hw;
++ struct dpni_rx_tc_dist_cfg tc_cfg;
++ struct dpkg_profile_cfg kg_cfg;
++ void *p_params;
++ int ret;
++
++ p_params = rte_malloc(
++ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
++ if (!p_params) {
++ PMD_DRV_LOG(ERR, "Memory unavaialble\n");
++ return -ENOMEM;
++ }
++ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
++ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
++
++ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
++ tc_cfg.dist_size = 0;
++ tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
++
++ ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Unable to prepare extract parameters\n");
++ rte_free(p_params);
++ return ret;
++ }
++
++ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
++ &tc_cfg);
++ rte_free(p_params);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Setting distribution for Rx failed with"
++ "err code: %d\n", ret);
++ return ret;
++ }
++ return ret;
++}
++
++static int
++dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
++{
++ int i=0;
++
++ for(i = 0;i < NUM_DQS_PER_QUEUE; i++) {
++ q_storage->dq_storage[i] = rte_malloc(NULL,
++ NUM_MAX_RECV_FRAMES * sizeof(struct qbman_result),
++ RTE_CACHE_LINE_SIZE);
++ if(!q_storage->dq_storage[i])
++ goto fail;
++ /*setting toggle for initial condition*/
++ q_storage->toggle = -1;
++ }
++ return 0;
++fail:
++ i -= 1;
++ while(i >= 0)
++ {
++ rte_free(q_storage->dq_storage[i]);
++ }
++ return -1;
++}
++
++static int
++dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
++{
++ struct rte_eth_dev_data *data = dev->data;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct rte_eth_conf *eth_conf = &data->dev_conf;
++ struct dpaa2_queue *dpaa2_q;
++ int i, ret;
++
++ for (i = 0; i < data->nb_rx_queues; i++) {
++ data->rx_queues[i] = priv->rx_vq[i];
++ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
++ if(dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
++ return -1;
++ }
++
++ for (i = 0; i < data->nb_tx_queues; i++) {
++ data->tx_queues[i] = priv->tx_vq[i];
++ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
++ dpaa2_q->cscn = rte_malloc(NULL, sizeof(struct qbman_result), 16);
++ if(!dpaa2_q->cscn)
++ goto fail_tx_queue;
++ }
++
++ /* Check for correct configuration */
++ if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
++ data->nb_rx_queues > 1) {
++ PMD_DRV_LOG(ERR, "Distribution is not enabled, "
++ "but Rx queues more than 1\n");
++ return -1;
++ }
++
++ if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
++ /* Return in case number of Rx queues is 1 */
++ if (data->nb_rx_queues == 1)
++ return 0;
++ ret = dpaa2_setup_flow_distribution(dev,
++ eth_conf->rx_adv_conf.rss_conf.rss_hf);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "dpaa2_setup_flow_distribution failed\n");
++ return ret;
++ }
++ }
++
++ return 0;
++ fail_tx_queue:
++ i -= 1;
++ while(i >= 0) {
++ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
++ rte_free(dpaa2_q->cscn);
++ }
++ return -1;
++}
++
++static int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
++ void *blist)
++{
++ /* Function to attach a DPNI with a buffer pool list. Buffer pool list
++ * handle is passed in blist.
++ */
++ int32_t retcode;
++ struct fsl_mc_io *dpni = priv->hw;
++ struct dpni_pools_cfg bpool_cfg;
++ struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
++
++ /*Attach buffer pool to the network interface as described by the user*/
++ bpool_cfg.num_dpbp = 1;
++ bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
++ bpool_cfg.pools[0].backup_pool = 0;
++ bpool_cfg.pools[0].buffer_size =
++ DPAA2_ALIGN_ROUNDUP(bp_list->buf_pool.size,
++ DPAA2_PACKET_LAYOUT_ALIGN);;
++
++ retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
++ if (retcode != 0) {
++ PMD_DRV_LOG(ERR, "Error in attaching the buffer pool list"
++ "bpid = %d Error code = %d\n",
++ bpool_cfg.pools[0].dpbp_id, retcode);
++ return retcode;
++ }
++
++ priv->bp_list = bp_list;
++ return 0;
++}
++
++/* Function to setup RX flow information. It contains traffic class ID,
++ * flow ID, destination configuration etc.
++ */
++static int
++dpaa2_rx_queue_setup(struct rte_eth_dev *dev,
++ uint16_t rx_queue_id,
++ uint16_t nb_rx_desc __rte_unused,
++ unsigned int socket_id __rte_unused,
++ const struct rte_eth_rxconf *rx_conf __rte_unused,
++ struct rte_mempool *mb_pool)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ struct dpaa2_queue *dpaa2_q;
++ struct dpni_queue_cfg cfg;
++ uint8_t tc_id, flow_id;
++ int ret;
++
++ PMD_DRV_LOG(INFO, "\n dev =%p, queue =%d, pool = %p, conf =%p",
++ dev, rx_queue_id, mb_pool, rx_conf);
++
++ if (!priv->bp_list) {
++ if (mb_pool->offload_ptr > MAX_BPID) {
++ printf ("\n ??? ERR - %s not a offloaded buffer pool",
++ __func__);
++ return -1;
++ }
++ ret = dpaa2_attach_bp_list(priv,
++ bpid_info[mb_pool->offload_ptr].bp_list);
++ if (ret)
++ return ret;
++ }
++ dpaa2_q = (struct dpaa2_queue *)dev->data->rx_queues[rx_queue_id];
++
++ /*Get the tc id and flow id from given VQ id*/
++ tc_id = rx_queue_id / MAX_DIST_PER_TC;
++ flow_id = rx_queue_id % MAX_DIST_PER_TC;
++ memset(&cfg, 0, sizeof(struct dpni_queue_cfg));
++
++ cfg.options = cfg.options | DPNI_QUEUE_OPT_USER_CTX;
++
++#ifdef DPAA2_STASHING
++ cfg.options = cfg.options | DPNI_QUEUE_OPT_FLC;
++#endif
++
++ cfg.user_ctx = (uint64_t)(dpaa2_q);
++#ifdef DPAA2_STASHING
++ cfg.flc_cfg.flc_type = DPNI_FLC_STASH;
++ cfg.flc_cfg.frame_data_size = DPNI_STASH_SIZE_64B;
++ /* Enabling Annotation stashing */
++ cfg.options |= DPNI_FLC_STASH_FRAME_ANNOTATION;
++ cfg.flc_cfg.options = DPNI_FLC_STASH_FRAME_ANNOTATION;
++#endif
++
++ cfg.options = cfg.options | DPNI_QUEUE_OPT_TAILDROP_THRESHOLD;
++ cfg.tail_drop_threshold = 2048;// 16 packet
++
++ ret = dpni_set_rx_flow(dpni, CMD_PRI_LOW, priv->token,
++ tc_id, flow_id, &cfg);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
++ return -1;
++ }
++ return 0;
++}
++
++static int
++dpaa2_tx_queue_setup(struct rte_eth_dev *dev,
++ uint16_t tx_queue_id,
++ uint16_t nb_tx_desc __rte_unused,
++ unsigned int socket_id __rte_unused,
++ const struct rte_eth_txconf *tx_conf __rte_unused)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct dpaa2_queue *dpaa2_q;
++ struct fsl_mc_io *dpni = priv->hw;
++ struct dpni_tx_flow_cfg cfg;
++ struct dpni_tx_conf_cfg tx_conf_cfg;
++#ifdef QBMAN_MULTI_TX
++ struct dpni_congestion_notification_cfg cong_notif_cfg;
++#endif
++ uint32_t tc_idx;
++ uint16_t flow_id = DPNI_NEW_FLOW_ID;
++ int ret;
++
++ PMD_INIT_FUNC_TRACE();
++
++ memset(&cfg, 0, sizeof(struct dpni_tx_flow_cfg));
++ cfg.l3_chksum_gen = 1;
++ cfg.options |= DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN;
++ cfg.l4_chksum_gen = 1;
++ cfg.options = DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN;
++ memset(&tx_conf_cfg, 0, sizeof(struct dpni_tx_conf_cfg));
++ tx_conf_cfg.errors_only = TRUE;
++
++ /*
++ if (action & DPAA2BUF_TX_CONF_REQUIRED) {
++ cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR;
++ cfg.use_common_tx_conf_queue =
++ ((action & DPAA2BUF_TX_CONF_ERR_ON_COMMON_Q) ?
++ TRUE : FALSE);
++ tx_conf_cfg.errors_only = FALSE;
++ }*/
++
++ if (priv->num_tc == 1)
++ tc_idx = 0;
++ else
++ tc_idx = tx_queue_id;
++
++ ret = dpni_set_tx_flow(dpni, CMD_PRI_LOW, priv->token, &flow_id, &cfg);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Error in setting the tx flow:"
++ "ErrorCode = %x\n", ret);
++ return -1;
++ }
++ /*Set tx-conf and error configuration*/
++ ret = dpni_set_tx_conf(dpni, CMD_PRI_LOW, priv->token,
++ flow_id, &tx_conf_cfg);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Error in setting tx conf settings: "
++ "ErrorCode = %x", ret);
++ return -1;
++ }
++
++ if (tx_queue_id == 0) {
++ /*Set tx-conf and error configuration*/
++ ret = dpni_set_tx_conf(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_COMMON_TX_CONF, &tx_conf_cfg);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Error in setting tx conf settings: "
++ "ErrorCode = %x", ret);
++ return -1;
++ }
++ }
++ /*todo - add the queue id support instead of hard queue id as "0" */
++ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[tx_queue_id];
++ dpaa2_q->tc_index = tc_idx;
++ if (flow_id == DPNI_NEW_FLOW_ID)
++ dpaa2_q->flow_id = 0;
++ else
++ dpaa2_q->flow_id = flow_id;
++
++#ifdef QBMAN_MULTI_TX
++ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
++ /*Notify about congestion when the queue size is 128 frames with each \
++ frame 64 bytes size*/
++ cong_notif_cfg.threshold_entry = CONG_ENTER_THRESHOLD;
++ /*Notify that the queue is not congested when the number of frames in \
++ the queue is below this thershold.
++ TODO: Check if this value is the optimum value for better performance*/
++ cong_notif_cfg.threshold_exit = CONG_EXIT_THRESHOLD;
++ cong_notif_cfg.message_ctx = 0;
++ cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
++ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
++ cong_notif_cfg.options = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | DPNI_CONG_OPT_COHERENT_WRITE;
++
++ ret = dpni_set_tx_tc_congestion_notification(dpni, CMD_PRI_LOW,
++ priv->token,
++ tc_idx, &cong_notif_cfg);
++ if(ret) {
++ PMD_DRV_LOG(ERR, "Error in setting tx congestion notification "
++ "settings: ErrorCode = %x", ret);
++ return -1;
++ }
++#endif
++ return 0;
++}
++
++static const uint32_t *
++dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
++{
++ static const uint32_t ptypes[] = {
++ /*todo -= add more types */
++ RTE_PTYPE_L2_ETHER,
++ RTE_PTYPE_L3_IPV4,
++ RTE_PTYPE_L3_IPV4_EXT,
++ RTE_PTYPE_L3_IPV6,
++ RTE_PTYPE_L3_IPV6_EXT,
++ RTE_PTYPE_L4_TCP,
++ RTE_PTYPE_L4_UDP,
++ RTE_PTYPE_L4_SCTP,
++ RTE_PTYPE_L4_ICMP,
++ RTE_PTYPE_UNKNOWN
++ };
++
++ if (dev->rx_pkt_burst == eth_dpaa2_prefetch_rx ||
++ dev->rx_pkt_burst == eth_dpaa2_rx)
++ return ptypes;
++ return NULL;
++}
++
++static int
++dpaa2_dev_start(struct rte_eth_dev *dev)
++{
++ struct rte_eth_dev_data *data = dev->data;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ struct dpni_queue_attr cfg;
++ uint16_t qdid;
++ struct dpaa2_queue *dpaa2_q;
++ int ret, i, mask = 0;
++
++ PMD_INIT_FUNC_TRACE();
++
++ dev->data->dev_link.link_status = 1;
++
++ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure %d in enabling dpni %d device\n",
++ ret, priv->hw_id);
++ return ret;
++ }
++
++ ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, &qdid);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
++ return ret;
++ }
++ priv->qdid = qdid;
++
++ for (i = 0; i < data->nb_rx_queues; i++) {
++ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
++ ret = dpni_get_rx_flow(dpni, CMD_PRI_LOW, priv->token,
++ dpaa2_q->tc_index, dpaa2_q->flow_id, &cfg);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Error to get flow "
++ "information Error code = %d\n", ret);
++ return ret;
++ }
++ dpaa2_q->fqid = cfg.fqid;
++ }
++ /*
++ * VLAN Offload Settings
++ */
++ if (priv->options & DPNI_OPT_VLAN_FILTER)
++ mask = ETH_VLAN_FILTER_MASK;
++
++ if (priv->options & DPNI_OPT_VLAN_MANIPULATION)
++ mask = ETH_VLAN_STRIP_MASK;
++
++ if (mask)
++ dpaa2_vlan_offload_set(dev, mask);
++
++ return 0;
++}
++
++/*********************************************************************
++ *
++ * This routine disables all traffic on the adapter by issuing a
++ * global reset on the MAC.
++ *
++ **********************************************************************/
++static void
++dpaa2_dev_stop(struct rte_eth_dev *dev)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ int ret;
++ struct rte_eth_link link;
++
++ dev->data->dev_link.link_status = 0;
++
++ ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure in disabling dpni %d device\n", priv->hw_id);
++ return;
++ }
++
++ /* clear the recorded link status */
++ memset(&link, 0, sizeof(link));
++ rte_dpni_dev_atomic_write_link_status(dev, &link);
++}
++
++static void
++dpaa2_dev_close(struct rte_eth_dev *dev)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ int ret;
++ struct rte_eth_link link;
++
++ /*Function is reverse of dpaa2_dev_init.
++ * It does the following:
++ * 1. Detach a DPNI from attached resources i.e. buffer pools, dpbp_id.
++ * 2. Close the DPNI device
++ * 3. Free the allocated reqources.
++ */
++
++ /* Clean the device first */
++ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure cleaning dpni device with"
++ "error code %d\n", ret);
++ return;
++ }
++
++ /*Close the device at underlying layer*/
++ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure closing dpni device with"
++ "error code %d\n", ret);
++ return;
++ }
++
++ /*Free the allocated memory for ethernet private data and dpni*/
++ priv->hw = NULL;
++ free(dpni);
++
++ memset(&link, 0, sizeof(link));
++ rte_dpni_dev_atomic_write_link_status(dev, &link);
++}
++
++static void
++dpaa2_dev_promiscuous_enable(
++ struct rte_eth_dev *dev)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, TRUE);
++ if (ret < 0)
++ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode");
++ return;
++}
++
++static void
++dpaa2_dev_promiscuous_disable(
++ struct rte_eth_dev *dev)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, FALSE);
++ if (ret < 0)
++ PMD_DRV_LOG(ERR, "Unable to disable promiscuous mode");
++ return;
++}
++
++static void
++dpaa2_dev_allmulticast_enable(
++ struct rte_eth_dev *dev)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
++ if (ret < 0)
++ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode");
++ return;
++}
++
++static void
++dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
++ if (ret < 0)
++ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode");
++ return;
++}
++
++static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return -EINVAL;
++ }
++
++ /* check that mtu is within the allowed range */
++
++ if ((mtu < ETHER_MIN_MTU) || (frame_size > ETHER_MAX_JUMBO_FRAME_LEN))
++ return -EINVAL;
++
++ /* Set the Max Rx frame length as 'mtu' +
++ * Maximum Ethernet header length */
++ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
++ mtu + ETH_VLAN_HLEN);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "setting the max frame length failed");
++ return -1;
++ }
++ if (priv->options & DPNI_OPT_IPF) {
++ ret = dpni_set_mtu(dpni, CMD_PRI_LOW, priv->token, mtu);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Setting the MTU failed");
++ return -1;
++ }
++ }
++
++ PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
++ return 0;
++}
++
++static void
++dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
++ struct ether_addr *addr,
++ __rte_unused uint32_t index,
++ __rte_unused uint32_t pool)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
++ priv->token, addr->addr_bytes);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Adding the MAC ADDR failed");
++ }
++
++ return;
++}
++
++static void
++dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
++ uint32_t index)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ struct rte_eth_dev_data *data = dev->data;
++ struct ether_addr *macaddr;
++
++ macaddr = &data->mac_addrs[index];
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
++ priv->token, macaddr->addr_bytes);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Removing the MAC ADDR failed");
++ }
++
++ return;
++}
++
++static void
++dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
++ struct ether_addr *addr)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
++ priv->token, addr->addr_bytes);
++
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Setting the MAC ADDR failed");
++ }
++
++ return;
++}
++
++int dpaa2_dev_get_mac_addr(struct rte_eth_dev *dev,
++ struct ether_addr *addr)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return -EINVAL;
++ }
++
++ ret = dpni_get_primary_mac_addr(dpni, CMD_PRI_LOW,
++ priv->token, addr->addr_bytes);
++
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Getting the MAC ADDR failed");
++ }
++
++ return ret;
++}
++
++/*int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int unicast,
++ int multicast)
++
++
++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++
++dpni_set_errors_behavior
++
++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++
++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++
++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int *en)
++
++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io,
++ uint32_t cmd_flags,
++ uint16_t token,
++ int en)
++
++*/
++
++static int dpaa2_timestamp_enable(struct rte_eth_dev *dev)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ struct dpni_buffer_layout layout;
++ int ret;
++
++ layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ layout.pass_timestamp = TRUE;
++
++ ret = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Enabling timestamp for Rx failed with"
++ "err code: %d", ret);
++ return ret;
++ }
++
++ ret = dpni_set_tx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Enabling timestamp failed for Tx with"
++ "err code: %d", ret);
++ return ret;
++ }
++
++ ret = dpni_set_tx_conf_buffer_layout(dpni, CMD_PRI_LOW,
++ priv->token, &layout);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Enabling timestamp failed for Tx-conf with"
++ "err code: %d", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int dpaa2_timestamp_disable(struct rte_eth_dev *dev)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ struct dpni_buffer_layout layout;
++ int ret;
++
++ layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ layout.pass_timestamp = FALSE;
++
++ ret = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Rx with"
++ "err code: %d", ret);
++ return ret;
++ }
++
++ ret = dpni_set_tx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Tx with"
++ "err code: %d", ret);
++ return ret;
++ }
++
++ ret = dpni_set_tx_conf_buffer_layout(dpni, CMD_PRI_LOW,
++ priv->token, &layout);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Tx-conf with"
++ "err code: %d", ret);
++ return ret;
++ }
++
++ return ret;
++}
++
++/* return 0 means link status changed, -1 means not changed */
++static int
++dpaa2_dev_get_link_info(struct rte_eth_dev *dev,
++ int wait_to_complete __rte_unused)
++{
++ int ret;
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++ struct rte_eth_link link, old;
++ struct dpni_link_state state = {0};
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return 0;
++ }
++ memset(&old, 0, sizeof(old));
++ rte_dpni_dev_atomic_read_link_status(dev, &old);
++
++ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
++ if (ret < 0) {
++ PMD_DRV_LOG(ERR, "dpni_get_link_state");
++ return 0;
++ }
++
++ if (state.up == 0) {
++ rte_dpni_dev_atomic_write_link_status(dev, &link);
++ if (state.up == old.link_status)
++ return -1;
++ return 0;
++ }
++ link.link_status = state.up;
++ link.link_speed = state.rate;
++
++ if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
++ link.link_duplex = ETH_LINK_HALF_DUPLEX;
++ else
++ link.link_duplex = ETH_LINK_FULL_DUPLEX;
++
++ rte_dpni_dev_atomic_write_link_status(dev, &link);
++
++ if (link.link_status == old.link_status)
++ return -1;
++
++ return 0;
++}
++
++static
++void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
++ struct rte_eth_stats *stats)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ int32_t retcode;
++ uint64_t value;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ if (!stats) {
++ PMD_DRV_LOG(ERR, "stats is NULL");
++ return;
++ }
++
++ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_FRAME, &value);
++ if (retcode)
++ goto error;
++ stats->ipackets = value;
++ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_BYTE, &value);
++ if (retcode)
++ goto error;
++ stats->ibytes = value;
++ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_FRAME_DROP, &value);
++ if (retcode)
++ goto error;
++ stats->ierrors = value;
++ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_FRAME_DISCARD, &value);
++ if (retcode)
++ goto error;
++ stats->ierrors = stats->ierrors + value;
++ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_EGR_FRAME, &value);
++ if (retcode)
++ goto error;
++ stats->opackets = value;
++ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_EGR_BYTE, &value);
++ if (retcode)
++ goto error;
++ stats->obytes = value;
++ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_EGR_FRAME_DISCARD, &value);
++ if (retcode)
++ goto error;
++ stats->oerrors = value;
++
++ return;
++
++error:
++ PMD_DRV_LOG(ERR, "Operation not completed:Error Code = %d\n", retcode);
++ return;
++};
++
++static
++void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
++{
++ struct dpaa2_dev_priv *priv = dev->data->dev_private;
++ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
++
++ int32_t retcode;
++
++ if (dpni == NULL) {
++ PMD_DRV_LOG(ERR, "dpni is NULL");
++ return;
++ }
++
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_FRAME, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_BYTE, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_BCAST_FRAME, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_BCAST_BYTES, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_MCAST_FRAME, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_MCAST_BYTE, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_FRAME_DROP, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_ING_FRAME_DISCARD, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_EGR_FRAME, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_EGR_BYTE, 0);
++ if (retcode)
++ goto error;
++ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
++ DPNI_CNT_EGR_FRAME_DISCARD, 0);
++ if (retcode)
++ goto error;
++
++ return;
++
++error:
++ PMD_DRV_LOG(ERR, "Operation not completed:Error Code = %d\n", retcode);
++ return;
++};
++
++static struct eth_dev_ops ops = {
++ .dev_configure = dpaa2_eth_dev_configure,
++ .dev_start = dpaa2_dev_start,
++ .dev_stop = dpaa2_dev_stop,
++ .dev_close = dpaa2_dev_close,
++ .promiscuous_enable = dpaa2_dev_promiscuous_enable,
++ .promiscuous_disable = dpaa2_dev_promiscuous_disable,
++ .allmulticast_enable = dpaa2_dev_allmulticast_enable,
++ .allmulticast_disable = dpaa2_dev_allmulticast_disable,
++ /* .dev_set_link_up = ixgbe_dev_set_link_up, */
++ /* .dev_set_link_down = ixgbe_dev_set_link_down, */
++ .link_update = dpaa2_dev_get_link_info,
++ .stats_get = dpaa2_dev_stats_get,
++ /* .xstats_get = ixgbe_dev_xstats_get, */
++ .stats_reset = dpaa2_dev_stats_reset,
++ /* .xstats_reset = ixgbe_dev_xstats_reset, */
++ /* .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, */
++ .dev_infos_get = dpaa2_eth_dev_info,
++ .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
++ .mtu_set = dpaa2_dev_mtu_set,
++ .vlan_filter_set = dpaa2_vlan_filter_set,
++/* .vlan_tpid_set = i40e_vlan_tpid_set, */
++ .vlan_offload_set = dpaa2_vlan_offload_set,
++/* .vlan_strip_queue_set = i40e_vlan_strip_queue_set, */
++/* .vlan_pvid_set = i40e_vlan_pvid_set, */
++/* .rx_queue_start = i40e_dev_rx_queue_start, */
++/* .rx_queue_stop = i40e_dev_rx_queue_stop, */
++/* .tx_queue_start = i40e_dev_tx_queue_start, */
++/* .tx_queue_stop = i40e_dev_tx_queue_stop, */
++ .rx_queue_setup = dpaa2_rx_queue_setup,
++/* .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable, */
++/* .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable, */
++/* .rx_queue_release = i40e_dev_rx_queue_release, */
++/* .rx_queue_count = i40e_dev_rx_queue_count, */
++ .tx_queue_setup = dpaa2_tx_queue_setup,
++/* .tx_queue_release = i40e_dev_tx_queue_release, */
++/* .dev_led_on = i40e_dev_led_on, */
++/* .dev_led_off = i40e_dev_led_off, */
++/* .flow_ctrl_get = i40e_flow_ctrl_get, */
++/* .flow_ctrl_set = i40e_flow_ctrl_set, */
++/* .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set, */
++ .mac_addr_add = dpaa2_dev_add_mac_addr,
++ .mac_addr_remove = dpaa2_dev_remove_mac_addr,
++/* .reta_update = i40e_dev_rss_reta_update, */
++/* .reta_query = i40e_dev_rss_reta_query, */
++/* .rss_hash_update = i40e_dev_rss_hash_update, */
++/* .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, */
++/* .filter_ctrl = i40e_dev_filter_ctrl, */
++/* .rxq_info_get = i40e_rxq_info_get, */
++/* .txq_info_get = i40e_txq_info_get, */
++/* .mirror_rule_set = i40e_mirror_rule_set, */
++/* .mirror_rule_reset = i40e_mirror_rule_reset, */
++ .timesync_enable = dpaa2_timestamp_enable,
++ .timesync_disable = dpaa2_timestamp_disable,
++/* .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp, */
++/* .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp, */
++/* .get_dcb_info = i40e_dev_get_dcb_info, */
++/* .timesync_adjust_time = i40e_timesync_adjust_time, */
++/* .timesync_read_time = i40e_timesync_read_time, */
++/* .timesync_write_time = i40e_timesync_write_time, */
++/* .get_reg_length = i40e_get_reg_length, */
++/* .get_reg = i40e_get_regs, */
++/* .get_eeprom_length = i40e_get_eeprom_length, */
++/* .get_eeprom = i40e_get_eeprom, */
++ .mac_addr_set = dpaa2_dev_set_mac_addr,
++};
++
++static int
++dpaa2_dev_init(struct rte_eth_dev *eth_dev)
++{
++ struct rte_eth_dev_data *data = eth_dev->data;
++ struct fsl_mc_io *dpni_dev;
++ struct dpni_attr attr;
++ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
++ struct dpni_buffer_layout layout;
++ int i, ret, hw_id = eth_dev->pci_dev->addr.devid;
++ struct dpni_extended_cfg *ext_cfg = NULL;
++ int tot_size;
++
++ PMD_INIT_FUNC_TRACE();
++
++ dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
++ if (!dpni_dev) {
++ PMD_DRV_LOG(ERR, "malloc failed for dpni device\n");
++ return -1;
++ }
++
++ dpni_dev->regs = mcp_ptr_list[0];
++ ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure in opening dpni@%d device with"
++ "error code %d\n", hw_id, ret);
++ return -1;
++ }
++
++ /* Clean the device first */
++ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure cleaning dpni@%d device with"
++ "error code %d\n", hw_id, ret);
++ return -1;
++ }
++
++ ext_cfg = (struct dpni_extended_cfg *)rte_malloc(NULL, 256,
++ RTE_CACHE_LINE_SIZE);
++ if (!ext_cfg) {
++ PMD_DRV_LOG(ERR, "No data memory\n");
++ return -1;
++ }
++ attr.ext_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(ext_cfg));
++
++ ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Failure in getting dpni@%d attribute, "
++ "error code %d\n", hw_id, ret);
++ return -1;
++ }
++
++ priv->num_tc = attr.max_tcs;
++ for (i = 0; i < attr.max_tcs; i++) {
++ priv->num_dist_per_tc[i] = ext_cfg->tc_cfg[i].max_dist;
++ priv->nb_rx_queues += priv->num_dist_per_tc[i];
++ /* todo - currently we only support one TC index in RX side */
++ break;
++ }
++ if (attr.max_tcs == 1)
++ priv->nb_tx_queues = attr.max_senders;
++ else
++ priv->nb_tx_queues = attr.max_tcs;
++ PMD_DRV_LOG(INFO, "num_tc %d\n", priv->num_tc);
++ PMD_DRV_LOG(INFO, "nb_rx_queues %d\n", priv->nb_rx_queues);
++
++ eth_dev->data->nb_rx_queues = priv->nb_rx_queues;
++ eth_dev->data->nb_tx_queues = priv->nb_tx_queues;
++
++ priv->hw = dpni_dev;
++ priv->hw_id = hw_id;
++ priv->options = attr.options;
++
++ priv->max_unicast_filters = attr.max_unicast_filters;
++ priv->max_multicast_filters = attr.max_multicast_filters;
++
++ if (attr.options & DPNI_OPT_VLAN_FILTER)
++ priv->max_vlan_filters = attr.max_vlan_filters;
++ else
++ priv->max_vlan_filters = 0;
++
++ ret = dpaa2_alloc_rx_tx_queues(eth_dev);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
++ return -1;
++ }
++
++ data->mac_addrs = (struct ether_addr *)malloc(sizeof(struct ether_addr));
++
++ /* Allocate memory for storing MAC addresses */
++ eth_dev->data->mac_addrs = rte_zmalloc("dpni",
++ ETHER_ADDR_LEN * attr.max_unicast_filters, 0);
++ if (eth_dev->data->mac_addrs == NULL) {
++ PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to "
++ "store MAC addresses",
++ ETHER_ADDR_LEN * attr.max_unicast_filters);
++ return -ENOMEM;
++ }
++
++ ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
++ priv->token,
++ (uint8_t *)(data->mac_addrs[0].addr_bytes));
++ if (ret) {
++ PMD_DRV_LOG(ERR, "DPNI get mac address failed:"
++ " Error Code = %d\n", ret);
++ return -1;
++ }
++
++ PMD_DRV_LOG(INFO, "Adding Broadcast Address...\n");
++ memset(data->mac_addrs[1].addr_bytes, 0xff, ETH_ADDR_LEN);
++ ret = dpni_add_mac_addr(dpni_dev, CMD_PRI_LOW,
++ priv->token,
++ (uint8_t *)(data->mac_addrs[1].addr_bytes));
++ if (ret) {
++ PMD_DRV_LOG(ERR, "DPNI set broadcast mac address failed:"
++ " Error Code = %0x\n", ret);
++ return -1;
++ }
++
++ /* ... rx buffer layout ... */
++ /*Check alignment for buffer layouts first*/
++ tot_size = DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES/* dummy */ +
++ 128 /*RTE_MUF */ + (128+DPAA2_RES)/*VLIB*/ + RTE_PKTMBUF_HEADROOM;
++ tot_size = DPAA2_ALIGN_ROUNDUP(tot_size,
++ DPAA2_PACKET_LAYOUT_ALIGN);
++
++ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
++ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
++ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
++ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
++
++ layout.pass_frame_status = 1;
++ layout.data_head_room =
++ (tot_size - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION));
++ layout.private_data_size = DPAA2_FD_PTA_SIZE;
++ layout.pass_parser_result = 1;
++
++ ret = dpni_set_rx_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
++ &layout);
++ if (ret) {
++ printf("Err(%d) in setting rx buffer layout\n", ret);
++ return -1;
++ }
++
++ /* ... tx buffer layout ... */
++ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
++ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
++ layout.pass_frame_status = 1;
++ ret = dpni_set_tx_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, &layout);
++ if (ret) {
++ printf("Error (%d) in setting tx buffer layout\n", ret);
++ return -1;
++ }
++
++ /* ... tx-conf and error buffer layout ... */
++ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
++ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
++ layout.pass_frame_status = 1;
++ ret = dpni_set_tx_conf_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, &layout);
++ if (ret) {
++ printf("Error (%d) in setting tx-conf buffer layout\n", ret);
++ return -1;
++ }
++
++ /* TODO - Set the MTU if required */
++
++ eth_dev->dev_ops = &ops;
++ eth_dev->rx_pkt_burst = eth_dpaa2_prefetch_rx;/*eth_dpaa2_rx;*/
++ eth_dev->tx_pkt_burst = eth_dpaa2_tx;
++
++ rte_free(ext_cfg);
++
++ return 0;
++}
++
++static struct eth_driver rte_dpaa2_dpni = {
++ {
++ .name = "rte_dpaa2_dpni",
++ .id_table = pci_id_dpaa2_map,
++ },
++ .eth_dev_init = dpaa2_dev_init,
++ .dev_private_size = sizeof(struct dpaa2_dev_priv),
++};
++
++static int
++rte_pmd_dpaa2_devinit(
++ const char *name __rte_unused,
++ const char *params __rte_unused)
++{
++ printf("Initializing dpaa2_pmd for %s\n", name);
++ rte_eth_driver_register(&rte_dpaa2_dpni);
++
++ return 0;
++}
++
++static struct rte_driver pmd_dpaa2_drv = {
++ .name = "dpaa2_pmd",
++ .type = PMD_PDEV,
++ .init = rte_pmd_dpaa2_devinit,
++};
++
++PMD_REGISTER_DRIVER(pmd_dpaa2_drv);
+diff --git a/drivers/net/dpaa2/rte_eth_dpni_annot.h b/drivers/net/dpaa2/rte_eth_dpni_annot.h
+new file mode 100644
+index 0000000..00fac9b
+--- /dev/null
++++ b/drivers/net/dpaa2/rte_eth_dpni_annot.h
+@@ -0,0 +1,311 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**
++ * @file
++ *
++ * DPNI packet parse results - implementation internal
++ */
++
++#ifndef RTE_ETH_DPNI_ANNOT_H_
++#define RTE_ETH_DPNI_ANNOT_H_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Annotation valid bits in FD FRC */
++#define DPAA2_FD_FRC_FASV 0x8000
++#define DPAA2_FD_FRC_FAEADV 0x4000
++#define DPAA2_FD_FRC_FAPRV 0x2000
++#define DPAA2_FD_FRC_FAIADV 0x1000
++#define DPAA2_FD_FRC_FASWOV 0x0800
++#define DPAA2_FD_FRC_FAICFDV 0x0400
++
++/* Annotation bits in FD CTRL */
++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
++#define DPAA2_FD_CTRL_PTA 0x00800000
++#define DPAA2_FD_CTRL_PTV1 0x00400000
++
++/* Frame annotation status */
++struct dpaa2_fas {
++ uint8_t reserved;
++ uint8_t ppid;
++ __le16 ifpid;
++ __le32 status;
++} __packed;
++
++/**
++ * Internal Packet annotation header
++ */
++struct pkt_annotation {
++ /**< word1: Frame Annotation Status (8 bytes)*/
++ uint64_t word1;
++ /**< word2: Time Stamp (8 bytes)*/
++ uint64_t word2;
++ /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/
++ uint64_t word3;
++ /**< word4: Frame Annotation Flags-FAF (8 bytes) */
++ uint64_t word4;
++ /**< word5:
++ ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset +
++ LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n +
++ LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
++ */
++ uint64_t word5;
++ /**< word6:
++ PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1
++ + IPOffset_norMInEncapO + GREOffset + L4Offset +
++ GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
++ */
++ uint64_t word6;
++ /**< word7:
++ RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset + IPv6FragOffset +
++ GrossRunningSum + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes)
++ */
++ uint64_t word7;
++ /**< word8:
++ ParseErrorcode + Soft Parsing Context (1 + 7 bytes)
++ */
++ uint64_t word8; /**< Layer 4 length */
++};
++
++/**
++ * Internal Macros to get/set Packet annotation header
++ */
++
++/** General Macro to define a particular bit position*/
++#define BIT_POS(x) ((uint64_t)1 << ((x)))
++/** Set a bit in the variable */
++#define BIT_SET_AT_POS(var, pos) (var |= pos)
++/** Reset the bit in the variable */
++#define BIT_RESET_AT_POS(var, pos) (var &= ~(pos))
++/** Check the bit is set in the variable */
++#define BIT_ISSET_AT_POS(var, pos) ((var & pos) ? 1 : 0)
++/**
++ * Macrso to define bit position in word3
++ */
++#define NEXT_HDR(var) ((uint64_t)var & 0xFFFF000000000000)
++#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16)
++#define FAF_EXTN_RESERVED(var) ((uint64_t)var & 0x00007FFF00000000)
++#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)var & 0x00000000FF000000)
++#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23)
++#define PARSING_ERROR BIT_POS(22)
++#define L2_ETH_MAC_PRESENT BIT_POS(21)
++#define L2_ETH_MAC_UNICAST BIT_POS(20)
++#define L2_ETH_MAC_MULTICAST BIT_POS(19)
++#define L2_ETH_MAC_BROADCAST BIT_POS(18)
++#define L2_ETH_FRAME_IS_BPDU BIT_POS(17)
++#define L2_ETH_FCOE_PRESENT BIT_POS(16)
++#define L2_ETH_FIP_PRESENT BIT_POS(15)
++#define L2_ETH_PARSING_ERROR BIT_POS(14)
++#define L2_LLC_SNAP_PRESENT BIT_POS(13)
++#define L2_UNKNOWN_LLC_OUI BIT_POS(12)
++#define L2_LLC_SNAP_ERROR BIT_POS(11)
++#define L2_VLAN_1_PRESENT BIT_POS(10)
++#define L2_VLAN_N_PRESENT BIT_POS(9)
++#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8)
++#define L2_VLAN_PARSING_ERROR BIT_POS(7)
++#define L2_PPPOE_PPP_PRESENT BIT_POS(6)
++#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5)
++#define L2_MPLS_1_PRESENT BIT_POS(4)
++#define L2_MPLS_N_PRESENT BIT_POS(3)
++#define L2_MPLS_PARSING_ERROR BIT_POS(2)
++#define L2_ARP_PRESENT BIT_POS(1)
++#define L2_ARP_PARSING_ERROR BIT_POS(0)
++/**
++ * Macrso to define bit position in word4
++ */
++#define L2_UNKNOWN_PROTOCOL BIT_POS(63)
++#define L2_SOFT_PARSING_ERROR BIT_POS(62)
++#define L3_IPV4_1_PRESENT BIT_POS(61)
++#define L3_IPV4_1_UNICAST BIT_POS(60)
++#define L3_IPV4_1_MULTICAST BIT_POS(59)
++#define L3_IPV4_1_BROADCAST BIT_POS(58)
++#define L3_IPV4_N_PRESENT BIT_POS(57)
++#define L3_IPV4_N_UNICAST BIT_POS(56)
++#define L3_IPV4_N_MULTICAST BIT_POS(55)
++#define L3_IPV4_N_BROADCAST BIT_POS(54)
++#define L3_IPV6_1_PRESENT BIT_POS(53)
++#define L3_IPV6_1_UNICAST BIT_POS(52)
++#define L3_IPV6_1_MULTICAST BIT_POS(51)
++#define L3_IPV6_N_PRESENT BIT_POS(50)
++#define L3_IPV6_N_UNICAST BIT_POS(49)
++#define L3_IPV6_N_MULTICAST BIT_POS(48)
++#define L3_IP_1_OPT_PRESENT BIT_POS(47)
++#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46)
++#define L3_IP_1_MORE_FRAGMENT BIT_POS(45)
++#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44)
++#define L3_IP_1_PARSING_ERROR BIT_POS(43)
++#define L3_IP_N_OPT_PRESENT BIT_POS(42)
++#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41)
++#define L3_IP_N_MORE_FRAGMENT BIT_POS(40)
++#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39)
++#define L3_PROTO_ICMP_PRESENT BIT_POS(38)
++#define L3_PROTO_IGMP_PRESENT BIT_POS(37)
++#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36)
++#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35)
++#define L3_IP_N_PARSING_ERROR BIT_POS(34)
++#define L3_MIN_ENCAP_PRESENT BIT_POS(33)
++#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32)
++#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31)
++#define L3_PROTO_GRE_PRESENT BIT_POS(30)
++#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29)
++#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28)
++#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27)
++#define L3_SOFT_PARSING_ERROR BIT_POS(26)
++#define L3_PROTO_UDP_PRESENT BIT_POS(25)
++#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24)
++#define L3_PROTO_TCP_PRESENT BIT_POS(23)
++#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22)
++#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21)
++#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20)
++#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19)
++#define L3_PROTO_IPSEC_PRESENT BIT_POS(18)
++#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17)
++#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16)
++#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15)
++#define L3_PROTO_SCTP_PRESENT BIT_POS(14)
++#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13)
++#define L3_PROTO_DCCP_PRESENT BIT_POS(12)
++#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11)
++#define L4_UNKNOWN_PROTOCOL BIT_POS(10)
++#define L4_SOFT_PARSING_ERROR BIT_POS(9)
++#define L3_PROTO_GTP_PRESENT BIT_POS(8)
++#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7)
++#define L3_PROTO_ESP_PRESENT BIT_POS(6)
++#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5)
++#define L3_PROTO_ISCSI_PRESENT BIT_POS(4)
++#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3)
++#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2)
++#define L5_SOFT_PARSING_ERROR BIT_POS(1)
++#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0)
++
++/**
++ * Macros to get values in word5
++ */
++#define SHIM_OFFSET_1(var) ((uint64_t)var & 0xFF00000000000000)
++#define SHIM_OFFSET_2(var) ((uint64_t)var & 0x00FF000000000000)
++#define IP_PID_OFFSET(var) ((uint64_t)var & 0x0000FF0000000000)
++#define ETH_OFFSET(var) ((uint64_t)var & 0x000000FF00000000)
++#define LLC_SNAP_OFFSET(var) ((uint64_t)var & 0x00000000FF000000)
++#define VLAN_TCI_OFFSET_1(var) ((uint64_t)var & 0x0000000000FF0000)
++#define VLAN_TCI_OFFSET_N(var) ((uint64_t)var & 0x000000000000FF00)
++#define LAST_ETYPE_OFFSET(var) ((uint64_t)var & 0x00000000000000FF)
++
++/**
++ * Macros to get values in word6
++ */
++#define PPPOE_OFFSET(var) ((uint64_t)var & 0xFF00000000000000)
++#define MPLS_OFFSET_1(var) ((uint64_t)var & 0x00FF000000000000)
++#define MPLS_OFFSET_N(var) ((uint64_t)var & 0x0000FF0000000000)
++#define ARP_OR_IP_OFFSET_1(var) ((uint64_t)var & 0x000000FF00000000)
++#define IP_N_OR_MIN_ENCAP_OFFSET(var) ((uint64_t)var & 0x00000000FF000000)
++#define GRE_OFFSET(var) ((uint64_t)var & 0x0000000000FF0000)
++#define L4_OFFSET(var) ((uint64_t)var & 0x000000000000FF00)
++#define GTP_OR_ESP_OR_IPSEC_OFFSET(var) ((uint64_t)var & 0x00000000000000FF)
++
++/**
++ * Macros to get values in word7
++ */
++#define IPV6_ROUTING_HDR_OFFSET_1(var) ((uint64_t)var & 0xFF00000000000000)
++#define IPV6_ROUTING_HDR_OFFSET_2(var) ((uint64_t)var & 0x00FF000000000000)
++#define NEXT_HDR_OFFSET(var) ((uint64_t)var & 0x0000FF0000000000)
++#define IPV6_FRAG_OFFSET(var) ((uint64_t)var & 0x000000FF00000000)
++#define GROSS_RUNNING_SUM(var) ((uint64_t)var & 0x00000000FFFF0000)
++#define RUNNING_SUM(var) ((uint64_t)var & 0x000000000000FFFF)
++
++/**
++ * Macros to get values in word8
++ */
++#define PARSE_ERROR_CODE(var) ((uint64_t)var & 0xFF00000000000000)
++#define SOFT_PARSING_CONTEXT(var) ((uint64_t)var & 0x00FFFFFFFFFFFFFF)
++
++/* Debug frame, otherwise supposed to be discarded */
++#define DPAA2_ETH_FAS_DISC 0x80000000
++/* MACSEC frame */
++#define DPAA2_ETH_FAS_MS 0x40000000
++#define DPAA2_ETH_FAS_PTP 0x08000000
++/* Ethernet multicast frame */
++#define DPAA2_ETH_FAS_MC 0x04000000
++/* Ethernet broadcast frame */
++#define DPAA2_ETH_FAS_BC 0x02000000
++#define DPAA2_ETH_FAS_KSE 0x00040000
++#define DPAA2_ETH_FAS_EOFHE 0x00020000
++#define DPAA2_ETH_FAS_MNLE 0x00010000
++#define DPAA2_ETH_FAS_TIDE 0x00008000
++#define DPAA2_ETH_FAS_PIEE 0x00004000
++/* Frame length error */
++#define DPAA2_ETH_FAS_FLE 0x00002000
++/* Frame physical error; our favourite pastime */
++#define DPAA2_ETH_FAS_FPE 0x00001000
++#define DPAA2_ETH_FAS_PTE 0x00000080
++#define DPAA2_ETH_FAS_ISP 0x00000040
++#define DPAA2_ETH_FAS_PHE 0x00000020
++#define DPAA2_ETH_FAS_BLE 0x00000010
++/* L3 csum validation performed */
++#define DPAA2_ETH_FAS_L3CV 0x00000008
++/* L3 csum error */
++#define DPAA2_ETH_FAS_L3CE 0x00000004
++/* L4 csum validation performed */
++#define DPAA2_ETH_FAS_L4CV 0x00000002
++/* L4 csum error */
++#define DPAA2_ETH_FAS_L4CE 0x00000001
++
++/* These bits always signal errors */
++#define DPAA2_ETH_RX_ERR_MASK (DPAA2_ETH_FAS_KSE | \
++ DPAA2_ETH_FAS_EOFHE | \
++ DPAA2_ETH_FAS_MNLE | \
++ DPAA2_ETH_FAS_TIDE | \
++ DPAA2_ETH_FAS_PIEE | \
++ DPAA2_ETH_FAS_FLE | \
++ DPAA2_ETH_FAS_FPE | \
++ DPAA2_ETH_FAS_PTE | \
++ DPAA2_ETH_FAS_ISP | \
++ DPAA2_ETH_FAS_PHE | \
++ DPAA2_ETH_FAS_BLE | \
++ DPAA2_ETH_FAS_L3CE | \
++ DPAA2_ETH_FAS_L4CE)
++/* Unsupported features in the ingress */
++#define DPAA2_ETH_RX_UNSUPP_MASK DPAA2_ETH_FAS_MS
++/* Tx errors */
++#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_ETH_FAS_KSE | \
++ DPAA2_ETH_FAS_EOFHE | \
++ DPAA2_ETH_FAS_MNLE | \
++ DPAA2_ETH_FAS_TIDE)
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
+new file mode 100644
+index 0000000..349c6e1
+--- /dev/null
++++ b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
+@@ -0,0 +1,4 @@
++DPDK_16.04 {
++
++ local: *;
++};
+diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
+index 2342fa1..8f27836 100644
+--- a/lib/librte_eal/common/eal_private.h
++++ b/lib/librte_eal/common/eal_private.h
+@@ -328,4 +328,16 @@ int rte_eal_hugepage_init(void);
+ */
+ int rte_eal_hugepage_attach(void);
+
++#ifdef RTE_LIBRTE_DPAA2_PMD
++/**
++ * Initialize any soc init related functions if any before thread creation
++ */
++int rte_eal_soc_pre_init(void);
++
++/**
++ * Initialize any soc init related functions if any after thread creation
++ */
++int rte_eal_soc_post_init(void);
++#endif
++
+ #endif /* _EAL_PRIVATE_H_ */
+diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
+index e109361..abcd02c 100644
+--- a/lib/librte_eal/linuxapp/eal/Makefile
++++ b/lib/librte_eal/linuxapp/eal/Makefile
+@@ -47,6 +47,13 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+ CFLAGS += -I$(RTE_SDK)/lib/librte_ring
+ CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+ CFLAGS += -I$(RTE_SDK)/lib/librte_ivshmem
++ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
++CFLAGS += -I$(RTE_SDK)/lib/librte_mbuf
++CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include
++CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include/drivers
++CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
++CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/driver
++endif
+ CFLAGS += $(WERROR_FLAGS) -O3
+
+ LDLIBS += -ldl
+@@ -72,6 +79,10 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_lcore.c
+ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_timer.c
+ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_interrupts.c
+ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_alarm.c
++ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
++SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_soc.c
++SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio_fsl_mc.c
++endif
+ ifeq ($(CONFIG_RTE_LIBRTE_IVSHMEM),y)
+ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_ivshmem.c
+ endif
+diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
+index 8aafd51..b2327c7 100644
+--- a/lib/librte_eal/linuxapp/eal/eal.c
++++ b/lib/librte_eal/linuxapp/eal/eal.c
+@@ -805,6 +805,11 @@ rte_eal_init(int argc, char **argv)
+ if (rte_eal_tailqs_init() < 0)
+ rte_panic("Cannot init tail queues for objects\n");
+
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ if (rte_eal_soc_pre_init() < 0)
++ rte_panic("Cannot pre init soc\n");
++#endif
++
+ #ifdef RTE_LIBRTE_IVSHMEM
+ if (rte_eal_ivshmem_obj_init() < 0)
+ rte_panic("Cannot init IVSHMEM objects\n");
+@@ -874,6 +879,11 @@ rte_eal_init(int argc, char **argv)
+ rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
+ rte_eal_mp_wait_lcore();
+
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ if (rte_eal_soc_post_init() < 0)
++ rte_panic("Cannot post init soc\n");
++#endif
++
+ /* Probe & Initialize PCI devices */
+ if (rte_eal_pci_probe())
+ rte_panic("Cannot probe PCI\n");
+diff --git a/lib/librte_eal/linuxapp/eal/eal_soc.c b/lib/librte_eal/linuxapp/eal/eal_soc.c
+new file mode 100644
+index 0000000..32ae172
+--- /dev/null
++++ b/lib/librte_eal/linuxapp/eal/eal_soc.c
+@@ -0,0 +1,84 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor, Inc or the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <unistd.h>
++#include <limits.h>
++#include <string.h>
++#include <dirent.h>
++
++#include <rte_log.h>
++#include <rte_eal.h>
++#include <rte_lcore.h>
++#include <rte_common.h>
++#include <rte_string_fns.h>
++#include <rte_debug.h>
++#include "eal_private.h"
++
++#ifdef RTE_LIBRTE_DPAA2_PMD
++#include "eal_vfio_fsl_mc.h"
++#endif
++
++
++#if (defined RTE_LIBRTE_DPAA_PMD)
++extern int usdpaa_pre_rte_eal_init(void);
++extern int usdpaa_post_rte_eal_init(void);
++#endif
++
++
++/* Initialize any soc init related functions if any before thread creation*/
++int
++rte_eal_soc_pre_init(void)
++{
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ if (rte_eal_dpaa2_init() < 0)
++ RTE_LOG(WARNING, EAL, "Cannot init FSL_MC SCAN \n");
++#endif
++#if (defined RTE_LIBRTE_DPAA_PMD)
++ if (usdpaa_pre_rte_eal_init())
++ RTE_LOG(WARNING, EAL, "Cannot init FSL_DPAA \n");
++#endif
++ return 0;
++}
++
++/* Initialize any soc init related functions if any after thread creation*/
++int
++rte_eal_soc_post_init(void)
++{
++#if (defined RTE_LIBRTE_DPAA_PMD)
++ if (usdpaa_post_rte_eal_init()) {
++ RTE_LOG(WARNING, EAL, "dpaa1: usdpaa portal init failed\n");
++ }
++#endif
++ return 0;
++}
++
+diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c
+new file mode 100644
+index 0000000..c71d8d6
+--- /dev/null
++++ b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c
+@@ -0,0 +1,653 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <unistd.h>
++#include <stdio.h>
++#include <sys/types.h>
++#include <string.h>
++#include <stdlib.h>
++#include <fcntl.h>
++#include <errno.h>
++#include <sys/ioctl.h>
++#include <sys/stat.h>
++#include <sys/types.h>
++#include <sys/mman.h>
++#include <sys/vfs.h>
++#include <libgen.h>
++#include <dirent.h>
++
++#include "rte_pci.h"
++#include "eal_vfio.h"
++
++#include <rte_log.h>
++
++#include "eal_vfio_fsl_mc.h"
++
++#include "rte_pci_dev_ids.h"
++#include "eal_filesystem.h"
++#include "eal_private.h"
++
++#ifndef VFIO_MAX_GROUPS
++#define VFIO_MAX_GROUPS 64
++#endif
++
++//#define DPAA2_STAGE2_STASHING
++
++/** Pathname of FSL-MC devices directory. */
++#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
++
++/* Number of VFIO containers & groups with in */
++static struct vfio_group vfio_groups[VFIO_MAX_GRP];
++static struct vfio_container vfio_containers[VFIO_MAX_CONTAINERS];
++static char *ls2bus_container;
++static int container_device_fd;
++static uint32_t *msi_intr_vaddr;
++void *(*mcp_ptr_list);
++static uint32_t mcp_id;
++
++static int vfio_connect_container(struct vfio_group *vfio_group)
++{
++ struct vfio_container *container;
++ int i, fd, ret;
++
++ /* Try connecting to vfio container already created */
++ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
++ container = &vfio_containers[i];
++ if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
++ RTE_LOG(ERR, EAL, "Container pre-exists with FD[0x%x]"
++ " for this group\n", container->fd);
++ vfio_group->container = container;
++ return 0;
++ }
++ }
++
++ /* Opens main vfio file descriptor which represents the "container" */
++ fd = open("/dev/vfio/vfio", O_RDWR);
++ if (fd < 0) {
++ RTE_LOG(ERR, EAL, "vfio: failed to open /dev/vfio/vfio\n");
++ return -errno;
++ }
++
++ ret = ioctl(fd, VFIO_GET_API_VERSION);
++ if (ret != VFIO_API_VERSION) {
++ RTE_LOG(ERR, EAL, "vfio: supported vfio version: %d, "
++ "reported version: %d", VFIO_API_VERSION, ret);
++ close(fd);
++ return -EINVAL;
++ }
++#ifndef DPAA2_STAGE2_STASHING
++ /* Check whether support for SMMU type IOMMU prresent or not */
++ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
++ /* Connect group to container */
++ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
++ if (ret) {
++ RTE_LOG(ERR, EAL, "vfio: failed to set group container:\n");
++ close(fd);
++ return -errno;
++ }
++
++ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
++ if (ret) {
++ RTE_LOG(ERR, EAL, "vfio: failed to set iommu for container:\n");
++ close(fd);
++ return -errno;
++ }
++ } else {
++ RTE_LOG(ERR, EAL, "vfio error: No supported IOMMU\n");
++ close(fd);
++ return -EINVAL;
++ }
++#else
++ /* Check whether support for SMMU type IOMMU stage 2 present or not */
++ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_NESTING_IOMMU)) {
++ /* Connect group to container */
++ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
++ if (ret) {
++ RTE_LOG(ERR, EAL, "vfio: failed to set group container:\n");
++ close(fd);
++ return -errno;
++ }
++
++ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_NESTING_IOMMU);
++ if (ret) {
++ RTE_LOG(ERR, EAL, "vfio: failed to set iommu-2 for container:\n");
++ close(fd);
++ return -errno;
++ }
++ } else {
++ RTE_LOG(ERR, EAL, "vfio error: No supported IOMMU-2\n");
++ close(fd);
++ return -EINVAL;
++ }
++#endif
++ container = NULL;
++ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
++ if (vfio_containers[i].used)
++ continue;
++ RTE_LOG(ERR, EAL, "DPAA2-Unused container at index %d\n", i);
++ container = &vfio_containers[i];
++ }
++ if (!container) {
++ RTE_LOG(ERR, EAL, "vfio error: No Free Container Found\n");
++ close(fd);
++ return -ENOMEM;
++ }
++
++ container->used = 1;
++ container->fd = fd;
++ container->group_list[container->index] = vfio_group;
++ vfio_group->container = container;
++ container->index++;
++ return 0;
++}
++
++static int vfio_map_irq_region(struct vfio_group *group)
++{
++ int ret;
++ unsigned long *vaddr = NULL;
++ struct vfio_iommu_type1_dma_map map = {
++ .argsz = sizeof(map),
++ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
++ .vaddr = 0x6030000,
++ .iova = 0x6030000,
++ .size = 0x1000,
++ };
++
++ vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
++ PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
++ if (vaddr == MAP_FAILED) {
++ RTE_LOG(ERR, EAL, " mapping GITS region (errno = %d)", errno);
++ return -errno;
++ }
++
++ msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
++ map.vaddr = (unsigned long)vaddr;
++ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
++ if (ret == 0)
++ return 0;
++
++ RTE_LOG(ERR, EAL, "vfio_map_irq_region fails (errno = %d)", errno);
++ return -errno;
++}
++
++int vfio_dmamap_mem_region(uint64_t vaddr,
++ uint64_t iova,
++ uint64_t size)
++{
++ struct vfio_group *group;
++ struct vfio_iommu_type1_dma_map dma_map = {
++ .argsz = sizeof(dma_map),
++ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
++ };
++
++ dma_map.vaddr = vaddr;
++ dma_map.size = size;
++ dma_map.iova = iova;
++
++ /* SET DMA MAP for IOMMU */
++ group = &vfio_groups[0];
++ if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) {
++ /* todo changes these to RTE_LOG */
++ RTE_LOG(ERR, EAL, "SWP: VFIO_IOMMU_MAP_DMA API Error %d.\n", errno);
++ return -1;
++ }
++ return 0;
++}
++
++static int32_t setup_dmamap(void)
++{
++ int ret;
++ struct vfio_group *group;
++ struct vfio_iommu_type1_dma_map dma_map = {
++ .argsz = sizeof(struct vfio_iommu_type1_dma_map),
++ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
++ };
++
++ int i;
++ const struct rte_memseg *memseg;
++
++ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
++ memseg = rte_eal_get_physmem_layout();
++ if (memseg == NULL) {
++ RTE_LOG(ERR, EAL,
++ "\nError Cannot get physical layout\n");
++ return -ENODEV;
++ }
++
++ if (memseg[i].addr == NULL && memseg[i].len == 0) {
++ break;
++ }
++
++ dma_map.size = memseg[i].len;
++ dma_map.vaddr = memseg[i].addr_64;
++#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
++ dma_map.iova = memseg[i].phys_addr;
++#else
++ dma_map.iova = dma_map.vaddr;
++#endif
++
++ /* SET DMA MAP for IOMMU */
++ group = &vfio_groups[0];
++
++ printf("-->Initial SHM Virtual ADDR %llX\n", dma_map.vaddr);
++ printf("-----> DMA size 0x%llX\n", dma_map.size);
++ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
++ if (ret) {
++ RTE_LOG(ERR, EAL,
++ "\nErr: VFIO_IOMMU_MAP_DMA API Error %d.\n",
++ errno);
++ return ret;
++ }
++ printf("-----> dma_map.vaddr = 0x%llX\n", dma_map.vaddr);
++ }
++
++ /* TODO - This is a W.A. as VFIO currently does not add the mapping of
++ the interrupt region to SMMU. This should be removed once the
++ support is added in the Kernel.
++ */
++ vfio_map_irq_region(group);
++
++ return 0;
++}
++
++static int vfio_set_group(struct vfio_group *group, int groupid)
++{
++ char path[PATH_MAX];
++ struct vfio_group_status status = { .argsz = sizeof(status) };
++
++ /* Open the VFIO file corresponding to the IOMMU group */
++ snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
++
++ group->fd = open(path, O_RDWR);
++ if (group->fd < 0) {
++ RTE_LOG(ERR, EAL, "vfio: error opening %s\n", path);
++ return -1;
++ }
++
++ /* Test & Verify that group is VIABLE & AVAILABLE */
++ if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
++ RTE_LOG(ERR, EAL, "vfio: error getting group status\n");
++ close(group->fd);
++ return -1;
++ }
++ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
++ RTE_LOG(ERR, EAL, "vfio: group not viable\n");
++ close(group->fd);
++ return -1;
++ }
++ /* Since Group is VIABLE, Store the groupid */
++ group->groupid = groupid;
++
++ /* Now connect this IOMMU group to given container */
++ if (vfio_connect_container(group)) {
++ RTE_LOG(ERR, EAL,
++ "vfio: error sonnecting container with group %d\n",
++ groupid);
++ close(group->fd);
++ return -1;
++ }
++
++ return 0;
++}
++
++static int32_t setup_vfio_grp(char *vfio_container)
++{
++ char path[PATH_MAX];
++ char iommu_group_path[PATH_MAX], *group_name;
++ struct vfio_group *group = NULL;
++ struct stat st;
++ int groupid;
++ int ret, len, i;
++
++ printf("\tProcessing Container = %s\n", vfio_container);
++ sprintf(path, "/sys/bus/fsl-mc/devices/%s", vfio_container);
++ /* Check whether ls-container exists or not */
++ printf("\tcontainer device path = %s\n", path);
++ if (stat(path, &st) < 0) {
++ RTE_LOG(ERR, EAL, "vfio: Error (%d) getting FSL-MC device (%s)\n",
++ errno, path);
++ return -errno;
++ }
++
++ /* DPRC container exists. NOw checkout the IOMMU Group */
++ strncat(path, "/iommu_group", sizeof(path) - strlen(path) - 1);
++
++ len = readlink(path, iommu_group_path, PATH_MAX);
++ if (len == -1) {
++ RTE_LOG(ERR, EAL, "\tvfio: error no iommu_group for device\n");
++ RTE_LOG(ERR, EAL, "\t%s: len = %d, errno = %d\n",
++ path, len, errno);
++ return -errno;
++ }
++
++ iommu_group_path[len] = 0;
++ group_name = basename(iommu_group_path);
++ if (sscanf(group_name, "%d", &groupid) != 1) {
++ RTE_LOG(ERR, EAL, "\tvfio: error reading %s: %m\n", path);
++ return -errno;
++ }
++
++ RTE_LOG(INFO, EAL, "\tvfio: iommu group id = %d\n", groupid);
++
++ /* Check if group already exists */
++ for (i = 0; i < VFIO_MAX_GRP; i++) {
++ group = &vfio_groups[i];
++ if (group->groupid == groupid) {
++ RTE_LOG(ERR, EAL, "groupid already exists %d\n", groupid);
++ return 0;
++ }
++ }
++
++ if (vfio_set_group(group, groupid)) {
++ RTE_LOG(ERR, EAL, "group setup failure - %d\n", groupid);
++ return -ENODEV;
++ }
++
++ /* Get Device information */
++ ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, vfio_container);
++ if (ret < 0) {
++ RTE_LOG(ERR, EAL, "\tvfio: error getting device %s fd from group %d\n",
++ vfio_container, group->groupid);
++ return ret;
++ }
++ container_device_fd = ret;
++ RTE_LOG(INFO, EAL, "vfio: Container FD is [0x%X]\n", container_device_fd);
++ /* Set up SMMU */
++ ret = setup_dmamap();
++ if (ret) {
++ RTE_LOG(ERR, EAL, ": Setting dma map\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++
++static int64_t vfio_map_mcp_obj(struct vfio_group *group, char *mcp_obj)
++{
++ int64_t v_addr = (int64_t)MAP_FAILED;
++ int32_t ret, mc_fd;
++
++ struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
++ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
++
++ /* getting the mcp object's fd*/
++ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
++ if (mc_fd < 0) {
++ RTE_LOG(ERR, EAL, "vfio: error getting device %s fd from group %d\n",
++ mcp_obj, group->fd);
++ return v_addr;
++ }
++
++ /* getting device info*/
++ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
++ if (ret < 0) {
++ RTE_LOG(ERR, EAL, "vfio: error getting DEVICE_INFO\n");
++ goto MC_FAILURE;
++ }
++
++ /* getting device region info*/
++ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
++ if (ret < 0) {
++ RTE_LOG(ERR, EAL, "vfio: error getting REGION_INFO\n");
++ goto MC_FAILURE;
++ }
++
++ RTE_LOG(INFO, EAL, "region offset = %llx , region size = %llx\n",
++ reg_info.offset, reg_info.size);
++
++ v_addr = (uint64_t)mmap(NULL, reg_info.size,
++ PROT_WRITE | PROT_READ, MAP_SHARED,
++ mc_fd, reg_info.offset);
++
++MC_FAILURE:
++ close(mc_fd);
++
++ return v_addr;
++}
++
++/* Following function shall fetch total available list of MC devices
++ * from VFIO container & populate private list of devices and other
++ * data structures
++ */
++static int vfio_process_group_devices(void)
++{
++ struct vfio_device *vdev;
++ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
++ char *temp_obj, *object_type, *mcp_obj, *dev_name;
++ int32_t object_id, i, dev_fd, ret;
++ DIR *d;
++ struct dirent *dir;
++ char path[PATH_MAX];
++ int64_t v_addr;
++ int ndev_count;
++ struct vfio_group *group = &vfio_groups[0];
++
++ sprintf(path, "/sys/kernel/iommu_groups/%d/devices", group->groupid);
++
++ d = opendir(path);
++ if (!d) {
++ RTE_LOG(ERR, EAL,"Unable to open directory %s\n", path);
++ return -1;
++ }
++
++ /*Counting the number of devices in a group and getting the mcp ID*/
++ ndev_count = 0;
++ mcp_obj = NULL;
++ while ((dir = readdir(d)) != NULL) {
++ if (dir->d_type == DT_LNK) {
++ ndev_count++;
++ if (!strncmp("dpmcp", dir->d_name, 5)) {
++ if (mcp_obj)
++ free(mcp_obj);
++ mcp_obj = malloc(sizeof(dir->d_name));
++ if (!mcp_obj) {
++ RTE_LOG(ERR, EAL,
++ "Unable to allocate memory\n");
++ return -ENOMEM;
++ }
++ strcpy(mcp_obj, dir->d_name);
++ temp_obj = strtok(dir->d_name, ".");
++ temp_obj = strtok(NULL, ".");
++ sscanf(temp_obj, "%d", &mcp_id);
++ }
++ }
++ }
++ closedir(d);
++
++ if (!mcp_obj) {
++ RTE_LOG(ERR, EAL,"MCP Object not Found\n");
++ return -ENODEV;
++ }
++ RTE_LOG(INFO, EAL,"Total devices in conatiner = %d, MCP ID = %d\n",
++ ndev_count, mcp_id);
++
++ /* Allocate the memory depends upon number of objects in a group*/
++ group->vfio_device = (struct vfio_device *)malloc(ndev_count * sizeof(struct vfio_device));
++ if (!(group->vfio_device)) {
++ RTE_LOG(ERR, EAL,"Unable to allocate memory\n");
++ free(mcp_obj);
++ return -ENOMEM;
++ }
++
++ /* Allocate memory for MC Portal list */
++ mcp_ptr_list = malloc(sizeof(void *) * 1);
++ if (!mcp_ptr_list) {
++ RTE_LOG(ERR, EAL, "NO Memory!\n");
++ free(mcp_obj);
++ goto FAILURE;
++ }
++
++ v_addr = vfio_map_mcp_obj(group, mcp_obj);
++ free(mcp_obj);
++ if (v_addr == (int64_t)MAP_FAILED) {
++ RTE_LOG(ERR, EAL, "mapping region (errno = %d)\n", errno);
++ goto FAILURE;
++ }
++
++ RTE_LOG(INFO, EAL, "MC has VIR_ADD = 0x%ld\n", v_addr);
++
++ mcp_ptr_list[0] = (void *)v_addr;
++
++ d = opendir(path);
++ if (!d) {
++ RTE_LOG(ERR, EAL, "Directory %s not able to open\n", path);
++ goto FAILURE;
++ }
++
++ i = 0;
++ printf("\nDPAA2 - Parsing MC Device Objects:\n");
++ /* Parsing each object and initiating them*/
++ while ((dir = readdir(d)) != NULL) {
++ if (dir->d_type != DT_LNK)
++ continue;
++ if (!strncmp("dprc", dir->d_name, 4) || !strncmp("dpmcp", dir->d_name, 5))
++ continue;
++ dev_name = malloc(sizeof(dir->d_name));
++ if (!dev_name) {
++ RTE_LOG(ERR, EAL, "Unable to allocate memory\n");
++ goto FAILURE;
++ }
++ strcpy(dev_name, dir->d_name);
++ object_type = strtok(dir->d_name, ".");
++ temp_obj = strtok(NULL, ".");
++ sscanf(temp_obj, "%d", &object_id);
++ RTE_LOG(INFO, EAL, "%s ", dev_name);
++
++ /* getting the device fd*/
++ dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name);
++ if (dev_fd < 0) {
++ RTE_LOG(ERR, EAL, "vfio getting device %s fd from group %d\n",
++ dev_name, group->fd);
++ free(dev_name);
++ goto FAILURE;
++ }
++
++ free(dev_name);
++ vdev = &group->vfio_device[group->object_index++];
++ vdev->fd = dev_fd;
++ vdev->index = i;
++ i++;
++ /* Get Device inofrmation */
++ if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &device_info)) {
++ RTE_LOG(ERR, EAL, "VFIO_DEVICE_FSL_MC_GET_INFO failed\n");
++ goto FAILURE;
++ }
++
++ if (!strcmp(object_type, "dpni") ||
++ !strcmp(object_type, "dpseci")) {
++ struct rte_pci_device *dev;
++
++ dev = malloc(sizeof(struct rte_pci_device));
++ if (dev == NULL) {
++ return -1;
++ }
++ memset(dev, 0, sizeof(*dev));
++ /* store hw_id of dpni/dpseci device */
++ dev->addr.devid = object_id;
++ dev->id.vendor_id = FSL_VENDOR_ID;
++ dev->id.device_id = (strcmp(object_type, "dpseci"))?
++ FSL_MC_DPNI_DEVID: FSL_MC_DPSECI_DEVID;
++
++ TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
++ }
++
++ if (!strcmp(object_type, "dpio")) {
++ dpaa2_create_dpio_device(vdev, &device_info, object_id);
++ }
++
++ if (!strcmp(object_type, "dpbp")) {
++ dpaa2_create_dpbp_device(object_id);
++ }
++ }
++ closedir(d);
++
++ ret = dpaa2_affine_qbman_swp();
++ if (ret)
++ RTE_LOG(ERR, EAL, "%s(): Err in affining qbman swp\n", __func__);
++
++ return 0;
++
++FAILURE:
++ free(group->vfio_device);
++ group->vfio_device = NULL;
++ return -1;
++}
++
++/*
++ * Scan the content of the PCI bus, and the devices in the devices
++ * list
++ */
++static int
++fsl_mc_scan(void)
++{
++ char path[PATH_MAX];
++ struct stat st;
++
++ ls2bus_container = getenv("DPRC");
++
++ if (ls2bus_container == NULL) {
++ RTE_LOG(WARNING, EAL, "vfio container not set in env DPRC\n");
++ return -1;
++ }
++
++ snprintf(path, sizeof(path), "%s/%s", SYSFS_FSL_MC_DEVICES,
++ ls2bus_container);
++ /* Check whether LS-Container exists or not */
++ RTE_LOG(INFO, EAL, "\tcontainer device path = %s\n", path);
++ if (stat(path, &st) < 0) {
++ RTE_LOG(ERR, EAL, "vfio:fsl-mc device does not exists\n");
++ return -1;
++ }
++ return 0;
++}
++
++/* Init the FSL-MC- LS2 EAL subsystem */
++int
++rte_eal_dpaa2_init(void)
++{
++ if (fsl_mc_scan() < 0)
++ return -1;
++
++#ifdef VFIO_PRESENT
++ if (setup_vfio_grp(ls2bus_container)) {
++ RTE_LOG(ERR, EAL, "setup_vfio_grp\n");
++ return -1;
++ }
++ if (vfio_process_group_devices()) {
++ RTE_LOG(ERR, EAL, "vfio_process_group_devices\n");
++ return -1;
++ }
++#endif
++ return 0;
++}
+diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h
+new file mode 100644
+index 0000000..7fc5ec6
+--- /dev/null
++++ b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h
+@@ -0,0 +1,102 @@
++/*-
++ * BSD LICENSE
++ *
++ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Freescale Semiconductor nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _EAL_VFIO_FSL_MC_H_
++#define _EAL_VFIO_FSL_MC_H_
++
++#include <rte_memory.h>
++#include <rte_mbuf.h>
++#include <rte_atomic.h>
++#include "eal_vfio.h"
++
++#define FSL_VENDOR_ID 0x1957
++#define FSL_MC_DPNI_DEVID 7
++#define FSL_MC_DPSECI_DEVID 3
++
++#define VFIO_MAX_GRP 1
++#define VFIO_MAX_CONTAINERS 1
++
++#define DPAA2_MBUF_HW_ANNOTATION 64
++#define DPAA2_FD_PTA_SIZE 64
++#define DPAA2_PACKET_LAYOUT_ALIGN 256
++#if (RTE_CACHE_LINE_SIZE == 128)
++#define DPAA2_RES 128
++#else
++#define DPAA2_RES 0
++#endif
++
++#define DPAA2_ALIGN_ROUNDUP(x, align) ((align) * (((x) + align - 1) / (align)))
++#define DPAA2_ALIGN_ROUNDUP_PTR(x, align)\
++ ((void *)DPAA2_ALIGN_ROUNDUP((uintptr_t)(x), (uintptr_t)(align)))
++
++typedef struct vfio_device {
++ int fd; /* fsl_mc root container device ?? */
++ int index; /*index of child object */
++ struct vfio_device *child; /* Child object */
++} vfio_device;
++
++typedef struct vfio_group {
++ int fd; /* /dev/vfio/"groupid" */
++ int groupid;
++ struct vfio_container *container;
++ int object_index;
++ struct vfio_device *vfio_device;
++} vfio_group;
++
++typedef struct vfio_container {
++ int fd; /* /dev/vfio/vfio */
++ int used;
++ int index; /* index in group list */
++ struct vfio_group *group_list[VFIO_MAX_GRP];
++} vfio_container;
++
++int vfio_dmamap_mem_region(
++ uint64_t vaddr,
++ uint64_t iova,
++ uint64_t size);
++
++/* initialize the NXP/FSL dpaa2 accelerators */
++int rte_eal_dpaa2_init(void);
++
++int dpaa2_create_dpio_device(struct vfio_device *vdev,
++ struct vfio_device_info *obj_info,
++ int object_id);
++
++int dpaa2_create_dpbp_device(int dpbp_id);
++
++int dpaa2_affine_qbman_swp(void);
++
++int dpaa2_affine_qbman_swp_sec(void);
++
++#endif
++
+diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile
+index 8d62b0d..92446d1 100644
+--- a/lib/librte_mbuf/Makefile
++++ b/lib/librte_mbuf/Makefile
+@@ -36,6 +36,10 @@ LIB = librte_mbuf.a
+
+ CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
++ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
++CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
++endif
++
+ EXPORT_MAP := rte_mbuf_version.map
+
+ LIBABIVER := 2
+diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
+index dc0467c..c4009ee 100644
+--- a/lib/librte_mbuf/rte_mbuf.c
++++ b/lib/librte_mbuf/rte_mbuf.c
+@@ -60,6 +60,59 @@
+ #include <rte_hexdump.h>
+ #include <rte_errno.h>
+
++#ifdef RTE_LIBRTE_DPAA2_PMD
++
++int __attribute__((weak))
++hw_mbuf_create_pool(
++struct rte_mempool __rte_unused *mp)
++{
++ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
++ return -1;
++}
++
++int __attribute__((weak))
++hw_mbuf_init(
++ struct rte_mempool __rte_unused*mp,
++ void __rte_unused *_m)
++{
++ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
++ return -1;
++}
++
++int __attribute__((weak))
++hw_mbuf_alloc(
++ struct rte_mempool __rte_unused *mp,
++ void __rte_unused **obj_p)
++{
++ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
++ return -1;
++}
++
++int __attribute__((weak))
++hw_mbuf_free(void __rte_unused *m)
++{
++ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
++ return -1;
++}
++
++int __attribute__((weak))
++hw_mbuf_alloc_bulk(struct rte_mempool __rte_unused *pool,
++ void __rte_unused **obj_table,
++ unsigned __rte_unused count)
++{
++ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
++ return -1;
++}
++
++int __attribute__((weak))
++hw_mbuf_free_bulk(struct rte_mempool __rte_unused *mp,
++ void __rte_unused * const *obj_table,
++ unsigned __rte_unused n)
++{
++ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
++ return -1;
++}
++#endif
+ /*
+ * ctrlmbuf constructor, given as a callback function to
+ * rte_mempool_create()
+@@ -106,6 +159,10 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
+
+ mbp_priv = rte_mempool_get_priv(mp);
+ memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ if (mp->flags & MEMPOOL_F_HW_PKT_POOL)
++ hw_mbuf_create_pool(mp);
++#endif
+ }
+
+ /*
+@@ -122,6 +179,12 @@ rte_pktmbuf_init(struct rte_mempool *mp,
+ struct rte_mbuf *m = _m;
+ uint32_t mbuf_size, buf_len, priv_size;
+
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) {
++ if (hw_mbuf_init(mp, m) == 0)
++ return;
++ }
++#endif
+ priv_size = rte_pktmbuf_priv_size(mp);
+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ buf_len = rte_pktmbuf_data_room_size(mp);
+@@ -170,7 +233,11 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,
+ return rte_mempool_create(name, n, elt_size,
+ cache_size, sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, &mbp_priv, rte_pktmbuf_init, NULL,
++#if defined(RTE_LIBRTE_DPAA2_PMD)
++ socket_id, MEMPOOL_F_HW_PKT_POOL);
++#else
+ socket_id, 0);
++#endif
+ }
+
+ /* do some sanity checks on a mbuf: panic if it fails */
+diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
+index a6898ef..6116d52 100644
+--- a/lib/librte_mempool/Makefile
++++ b/lib/librte_mempool/Makefile
+@@ -36,6 +36,10 @@ LIB = librte_mempool.a
+
+ CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
++ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
++CFLAGS += -I$(RTE_SDK)/lib/librte_mbuf
++CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
++endif
+ EXPORT_MAP := rte_mempool_version.map
+
+ LIBABIVER := 1
+diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
+index f8781e1..ac9595d 100644
+--- a/lib/librte_mempool/rte_mempool.c
++++ b/lib/librte_mempool/rte_mempool.c
+@@ -60,6 +60,10 @@
+
+ #include "rte_mempool.h"
+
++#ifdef RTE_LIBRTE_DPAA2_PMD
++#include "eal_vfio_fsl_mc.h"
++#endif
++
+ TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
+
+ static struct rte_tailq_elem rte_mempool_tailq = {
+@@ -316,6 +320,12 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
+
+ /* this is the size of an object, including header and trailer */
+ sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ if (flags & MEMPOOL_F_HW_PKT_POOL)
++ sz->total_size += DPAA2_ALIGN_ROUNDUP(
++ DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE,
++ DPAA2_PACKET_LAYOUT_ALIGN);
++#endif
+
+ return sz->total_size;
+ }
+@@ -590,6 +600,9 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
+ mp->cache_size = cache_size;
+ mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
+ mp->private_data_size = private_data_size;
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ mp->offload_ptr = UINTPTR_MAX;
++#endif
+
+ /* calculate address of the first element for continuous mempool. */
+ obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
+diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
+index 9745bf0..304a434 100644
+--- a/lib/librte_mempool/rte_mempool.h
++++ b/lib/librte_mempool/rte_mempool.h
+@@ -215,7 +215,10 @@ struct rte_mempool {
+ uintptr_t elt_va_end;
+ /**< Virtual address of the <size + 1> mempool object. */
+ phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
++#ifdef RTE_LIBRTE_DPAA2_PMD
+ /**< Array of physical page addresses for the mempool objects buffer. */
++ uintptr_t offload_ptr;
++#endif
+
+ } __rte_cache_aligned;
+
+@@ -223,7 +226,18 @@ struct rte_mempool {
+ #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
+ #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
+ #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
+-
++#ifdef RTE_LIBRTE_DPAA2_PMD
++#define MEMPOOL_F_HW_PKT_POOL 0x0010 /**< HW offload for packet buffer mgmt*/
++
++int hw_mbuf_create_pool(struct rte_mempool *mp);
++int hw_mbuf_init(struct rte_mempool *mp, void *_m);
++int hw_mbuf_alloc(struct rte_mempool *mp, void **obj_p);
++int hw_mbuf_free(void *_m);
++int hw_mbuf_alloc_bulk(struct rte_mempool *pool,
++ void **obj_table, unsigned count);
++int hw_mbuf_free_bulk(struct rte_mempool *mp, void * const *obj_table,
++ unsigned n);
++#endif
+ /**
+ * @internal When debug is enabled, store some statistics.
+ *
+@@ -877,6 +891,12 @@ static inline void __attribute__((always_inline))
+ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+ {
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) {
++ if (hw_mbuf_free_bulk(mp, obj_table, n) == 0)
++ return;
++ }
++#endif
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
+ }
+@@ -1091,6 +1111,14 @@ static inline int __attribute__((always_inline))
+ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+ {
+ int ret;
++
++#ifdef RTE_LIBRTE_DPAA2_PMD
++ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) {
++ ret = hw_mbuf_alloc_bulk(mp, obj_table, n);
++ if (ret > -2)
++ return ret;
++ }
++#endif
+ ret = __mempool_get_bulk(mp, obj_table, n,
+ !(mp->flags & MEMPOOL_F_SC_GET));
+ if (ret == 0)
+diff --git a/mk/machine/dpaa2/rte.vars.mk b/mk/machine/dpaa2/rte.vars.mk
+new file mode 100644
+index 0000000..8541633
+--- /dev/null
++++ b/mk/machine/dpaa2/rte.vars.mk
+@@ -0,0 +1,60 @@
++# BSD LICENSE
++#
++# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
++#
++# Redistribution and use in source and binary forms, with or without
++# modification, are permitted provided that the following conditions
++# are met:
++#
++# * Redistributions of source code must retain the above copyright
++# notice, this list of conditions and the following disclaimer.
++# * Redistributions in binary form must reproduce the above copyright
++# notice, this list of conditions and the following disclaimer in
++# the documentation and/or other materials provided with the
++# distribution.
++# * Neither the name of Freescale Semiconductor nor the names of its
++# contributors may be used to endorse or promote products derived
++# from this software without specific prior written permission.
++#
++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++#
++# machine:
++#
++# - can define ARCH variable (overridden by cmdline value)
++# - can define CROSS variable (overridden by cmdline value)
++# - define MACHINE_CFLAGS variable (overridden by cmdline value)
++# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
++# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
++# - can define CPU_CFLAGS variable (overridden by cmdline value) that
++# overrides the one defined in arch.
++# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
++# overrides the one defined in arch.
++# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
++# overrides the one defined in arch.
++# - may override any previously defined variable
++#
++
++# ARCH =
++# CROSS =
++# MACHINE_CFLAGS =
++# MACHINE_LDFLAGS =
++# MACHINE_ASFLAGS =
++# CPU_CFLAGS =
++# CPU_LDFLAGS =
++# CPU_ASFLAGS =
++MACHINE_CFLAGS += -march=armv8-a
++
++ifdef CONFIG_RTE_ARCH_ARM_TUNE
++MACHINE_CFLAGS += -mcpu=$(CONFIG_RTE_ARCH_ARM_TUNE)
++endif
+diff --git a/mk/rte.app.mk b/mk/rte.app.mk
+index c66e491..ee25ba3 100644
+--- a/mk/rte.app.mk
++++ b/mk/rte.app.mk
+@@ -125,6 +125,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
+
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt
++_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_pmd_dpaa2
+
+ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
+ # plugins (link only if static libraries)
+--
+2.5.0
+